repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/textparse/promlex.l.go | model/textparse/promlex.l.go | // Code generated by golex. DO NOT EDIT.
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"fmt"
)
const (
sInit = iota
sComment
sMeta1
sMeta2
sLabels
sLValue
sValue
sTimestamp
sExemplar
sEValue
sETimestamp
)
// Lex is called by the parser generated by "go tool yacc" to obtain each
// token. The method is opened before the matching rules block and closed at
// the end of the file.
func (l *promlexer) Lex() token {
if l.i >= len(l.b) {
return tEOF
}
c := l.b[l.i]
l.start = l.i
yystate0:
switch yyt := l.state; yyt {
default:
panic(fmt.Errorf(`invalid start condition %d`, yyt))
case 0: // start condition: INITIAL
goto yystart1
case 1: // start condition: sComment
goto yystart9
case 2: // start condition: sMeta1
goto yystart20
case 3: // start condition: sMeta2
goto yystart25
case 4: // start condition: sLabels
goto yystart28
case 5: // start condition: sLValue
goto yystart36
case 6: // start condition: sValue
goto yystart40
case 7: // start condition: sTimestamp
goto yystart43
}
yystate1:
c = l.next()
yystart1:
switch {
default:
goto yyabort
case c == '#':
goto yystate5
case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate7
case c == '\n':
goto yystate4
case c == '\t' || c == ' ':
goto yystate3
case c == '\x00':
goto yystate2
case c == '{':
goto yystate8
}
yystate2:
c = l.next()
goto yyrule1
yystate3:
c = l.next()
switch c {
default:
goto yyrule3
case '\t', ' ':
goto yystate3
}
yystate4:
c = l.next()
goto yyrule2
yystate5:
c = l.next()
switch c {
default:
goto yyrule5
case '\t', ' ':
goto yystate6
}
yystate6:
c = l.next()
switch c {
default:
goto yyrule4
case '\t', ' ':
goto yystate6
}
yystate7:
c = l.next()
switch {
default:
goto yyrule11
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate7
}
yystate8:
c = l.next()
goto yyrule13
yystate9:
c = l.next()
yystart9:
switch c {
default:
goto yyabort
case 'H':
goto yystate10
case 'T':
goto yystate15
case '\t', ' ':
goto yystate3
}
yystate10:
c = l.next()
switch c {
default:
goto yyabort
case 'E':
goto yystate11
}
yystate11:
c = l.next()
switch c {
default:
goto yyabort
case 'L':
goto yystate12
}
yystate12:
c = l.next()
switch c {
default:
goto yyabort
case 'P':
goto yystate13
}
yystate13:
c = l.next()
switch c {
default:
goto yyabort
case '\t', ' ':
goto yystate14
}
yystate14:
c = l.next()
switch c {
default:
goto yyrule6
case '\t', ' ':
goto yystate14
}
yystate15:
c = l.next()
switch c {
default:
goto yyabort
case 'Y':
goto yystate16
}
yystate16:
c = l.next()
switch c {
default:
goto yyabort
case 'P':
goto yystate17
}
yystate17:
c = l.next()
switch c {
default:
goto yyabort
case 'E':
goto yystate18
}
yystate18:
c = l.next()
switch c {
default:
goto yyabort
case '\t', ' ':
goto yystate19
}
yystate19:
c = l.next()
switch c {
default:
goto yyrule7
case '\t', ' ':
goto yystate19
}
yystate20:
c = l.next()
yystart20:
switch {
default:
goto yyabort
case c == '"':
goto yystate21
case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate24
case c == '\t' || c == ' ':
goto yystate3
}
yystate21:
c = l.next()
switch {
default:
goto yyabort
case c == '"':
goto yystate22
case c == '\\':
goto yystate23
case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
goto yystate21
}
yystate22:
c = l.next()
goto yyrule8
yystate23:
c = l.next()
switch {
default:
goto yyabort
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
goto yystate21
}
yystate24:
c = l.next()
switch {
default:
goto yyrule9
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate24
}
yystate25:
c = l.next()
yystart25:
switch {
default:
goto yyrule10
case c == '\t' || c == ' ':
goto yystate27
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
goto yystate26
}
yystate26:
c = l.next()
switch {
default:
goto yyrule10
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
goto yystate26
}
yystate27:
c = l.next()
switch {
default:
goto yyrule3
case c == '\t' || c == ' ':
goto yystate27
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
goto yystate26
}
yystate28:
c = l.next()
yystart28:
switch {
default:
goto yyabort
case c == '"':
goto yystate29
case c == ',':
goto yystate32
case c == '=':
goto yystate33
case c == '\t' || c == ' ':
goto yystate3
case c == '}':
goto yystate35
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate34
}
yystate29:
c = l.next()
switch {
default:
goto yyabort
case c == '"':
goto yystate30
case c == '\\':
goto yystate31
case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
goto yystate29
}
yystate30:
c = l.next()
goto yyrule15
yystate31:
c = l.next()
switch {
default:
goto yyabort
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
goto yystate29
}
yystate32:
c = l.next()
goto yyrule18
yystate33:
c = l.next()
goto yyrule17
yystate34:
c = l.next()
switch {
default:
goto yyrule14
case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate34
}
yystate35:
c = l.next()
goto yyrule16
yystate36:
c = l.next()
yystart36:
switch c {
default:
goto yyabort
case '"':
goto yystate37
case '\t', ' ':
goto yystate3
}
yystate37:
c = l.next()
switch {
default:
goto yyabort
case c == '"':
goto yystate38
case c == '\\':
goto yystate39
case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
goto yystate37
}
yystate38:
c = l.next()
goto yyrule19
yystate39:
c = l.next()
switch {
default:
goto yyabort
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
goto yystate37
}
yystate40:
c = l.next()
yystart40:
switch {
default:
goto yyabort
case c == '\t' || c == ' ':
goto yystate3
case c == '{':
goto yystate42
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
goto yystate41
}
yystate41:
c = l.next()
switch {
default:
goto yyrule20
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
goto yystate41
}
yystate42:
c = l.next()
goto yyrule12
yystate43:
c = l.next()
yystart43:
switch {
default:
goto yyabort
case c == '\n':
goto yystate44
case c == '\t' || c == ' ':
goto yystate3
case c >= '0' && c <= '9':
goto yystate45
}
yystate44:
c = l.next()
goto yyrule22
yystate45:
c = l.next()
switch {
default:
goto yyrule21
case c >= '0' && c <= '9':
goto yystate45
}
yyrule1: // \0
{
return tEOF
}
yyrule2: // \n
{
l.state = sInit
return tLinebreak
}
yyrule3: // [ \t]+
{
return tWhitespace
}
yyrule4: // #[ \t]+
{
l.state = sComment
goto yystate0
}
yyrule5: // #
{
return l.consumeComment()
}
yyrule6: // HELP[\t ]+
{
l.state = sMeta1
return tHelp
}
yyrule7: // TYPE[\t ]+
{
l.state = sMeta1
return tType
}
yyrule8: // \"(\\.|[^\\"])*\"
{
l.state = sMeta2
return tMName
}
yyrule9: // {M}({M}|{D})*
{
l.state = sMeta2
return tMName
}
yyrule10: // {C}*
{
l.state = sInit
return tText
}
yyrule11: // {M}({M}|{D})*
{
l.state = sValue
return tMName
}
yyrule12: // \{
{
l.state = sLabels
return tBraceOpen
}
yyrule13: // \{
{
l.state = sLabels
return tBraceOpen
}
yyrule14: // {L}({L}|{D})*
{
return tLName
}
yyrule15: // \"(\\.|[^\\"])*\"
{
l.state = sLabels
return tQString
}
yyrule16: // \}
{
l.state = sValue
return tBraceClose
}
yyrule17: // =
{
l.state = sLValue
return tEqual
}
yyrule18: // ,
{
return tComma
}
yyrule19: // \"(\\.|[^\\"])*\"
{
l.state = sLabels
return tLValue
}
yyrule20: // [^{ \t\n]+
{
l.state = sTimestamp
return tValue
}
yyrule21: // {D}+
{
return tTimestamp
}
yyrule22: // \n
if true { // avoid go vet determining the below panic will not be reached
l.state = sInit
return tLinebreak
}
panic("unreachable")
yyabort: // no lexem recognized
// silence unused label errors for build and satisfy go vet reachability analysis
{
if false {
goto yyabort
}
if false {
goto yystate0
}
if false {
goto yystate1
}
if false {
goto yystate9
}
if false {
goto yystate20
}
if false {
goto yystate25
}
if false {
goto yystate28
}
if false {
goto yystate36
}
if false {
goto yystate40
}
if false {
goto yystate43
}
}
// Workaround to gobble up comments that started with a HELP or TYPE
// prefix. We just consume all characters until we reach a newline.
// This saves us from adding disproportionate complexity to the parser.
if l.state == sComment {
return l.consumeComment()
}
return tInvalid
}
func (l *promlexer) consumeComment() token {
for c := l.cur(); ; c = l.next() {
switch c {
case 0:
return tEOF
case '\n':
l.state = sInit
return tComment
}
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/textparse/benchmark_test.go | model/textparse/benchmark_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
)
// BenchmarkParse... set of benchmarks analyze efficiency of parsing various
// datasets with different parsers. It mimics how scrape/scrape.go#append use parsers
// and allows comparison with expfmt decoders if applicable.
//
// NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated
// Series, Series+Metrics with and without reuse, Series+ST. Those cases are sometimes
// good to know if you are working on a certain optimization, but it does not
// make sense to persist such cases for everybody (e.g. for CI one day).
// For local iteration, feel free to adjust cases/comment out code etc.
//
// NOTE(bwplotka): Those benchmarks are purposefully categorized per data-sets,
// to avoid temptation to assess "what parser (OM, proto, prom) is the fastest,
// in general" here due to not every parser supporting every data set type.
// Use scrape.BenchmarkScrapeLoopAppend if you want one benchmark comparing parsers fairly.
/*
export bench=v1 && go test ./model/textparse/... \
-run '^$' -bench '^BenchmarkParsePromText' \
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkParsePromText(b *testing.B) {
data := readTestdataFile(b, "alltypes.237mfs.prom.txt")
for _, parser := range []string{
"promtext",
"omtext", // Compare how omtext parser deals with Prometheus text format.
"expfmt-promtext",
} {
b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) {
if strings.HasPrefix(parser, "expfmt-") {
benchExpFmt(b, data, parser)
} else {
benchParse(b, data, parser)
}
})
}
}
/*
export bench=v1 && go test ./model/textparse/... \
-run '^$' -bench '^BenchmarkParsePromText_NoMeta' \
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkParsePromText_NoMeta(b *testing.B) {
data := readTestdataFile(b, "alltypes.237mfs.nometa.prom.txt")
for _, parser := range []string{
"promtext",
"expfmt-promtext",
} {
b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) {
if strings.HasPrefix(parser, "expfmt-") {
benchExpFmt(b, data, parser)
} else {
benchParse(b, data, parser)
}
})
}
}
/*
export bench=v1 && go test ./model/textparse/... \
-run '^$' -bench '^BenchmarkParseOMText' \
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkParseOMText(b *testing.B) {
data := readTestdataFile(b, "alltypes.5mfs.om.txt")
// TODO(bwplotka): Add comparison with expfmt.TypeOpenMetrics once expfmt
// support OM exemplars, see https://github.com/prometheus/common/issues/703.
benchParse(b, data, "omtext")
}
/*
export bench=v1 && go test ./model/textparse/... \
-run '^$' -bench '^BenchmarkParsePromProto' \
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkParsePromProto(b *testing.B) {
data := createTestProtoBuf(b).Bytes()
// TODO(bwplotka): Add comparison with expfmt.TypeProtoDelim once expfmt
// support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430.
benchParse(b, data, "promproto")
}
/*
export bench=v1 && go test ./model/textparse/... \
-run '^$' -bench '^BenchmarkParseOpenMetricsNHCB' \
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkParseOpenMetricsNHCB(b *testing.B) {
data := readTestdataFile(b, "1histogram.om.txt")
for _, parser := range []string{
"omtext", // Measure OM parser baseline for histograms.
"omtext_with_nhcb", // Measure NHCB over OM parser.
} {
b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) {
benchParse(b, data, parser)
})
}
}
func benchParse(b *testing.B, data []byte, parser string) {
type newParser func([]byte, *labels.SymbolTable) Parser
var newParserFn newParser
switch parser {
case "promtext":
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
return NewPromParser(b, st, false)
}
case "promproto":
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
return NewProtobufParser(b, false, true, false, false, st)
}
case "omtext":
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
return NewOpenMetricsParser(b, st, WithOMParserSTSeriesSkipped())
}
case "omtext_with_nhcb":
newParserFn = func(buf []byte, st *labels.SymbolTable) Parser {
p, err := New(buf, "application/openmetrics-text", st, ParserOptions{ConvertClassicHistogramsToNHCB: true})
require.NoError(b, err)
return p
}
default:
b.Fatal("unknown parser", parser)
}
var (
res labels.Labels
e exemplar.Exemplar
)
b.SetBytes(int64(len(data)))
b.ReportAllocs()
st := labels.NewSymbolTable()
for b.Loop() {
p := newParserFn(data, st)
Inner:
for {
t, err := p.Next()
switch t {
case EntryInvalid:
if errors.Is(err, io.EOF) {
break Inner
}
b.Fatal(err)
case EntryType:
_, _ = p.Type()
continue
case EntryHelp:
_, _ = p.Help()
continue
case EntryUnit:
_, _ = p.Unit()
continue
case EntryComment:
continue
case EntryHistogram:
_, _, _, _ = p.Histogram()
case EntrySeries:
_, _, _ = p.Series()
default:
b.Fatal("not implemented entry", t)
}
p.Labels(&res)
_ = p.StartTimestamp()
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
}
}
}
}
func benchExpFmt(b *testing.B, data []byte, expFormatTypeStr string) {
expfmtFormatType := expfmt.TypeUnknown
switch expFormatTypeStr {
case "expfmt-promtext":
expfmtFormatType = expfmt.TypeProtoText
case "expfmt-promproto":
expfmtFormatType = expfmt.TypeProtoDelim
case "expfmt-omtext":
expfmtFormatType = expfmt.TypeOpenMetrics
default:
b.Fatal("unknown expfmt format type", expFormatTypeStr)
}
b.SetBytes(int64(len(data)))
b.ReportAllocs()
for b.Loop() {
decSamples := make(model.Vector, 0, 50)
sdec := expfmt.SampleDecoder{
Dec: expfmt.NewDecoder(bytes.NewReader(data), expfmt.NewFormat(expfmtFormatType)),
Opts: &expfmt.DecodeOptions{
Timestamp: model.TimeFromUnixNano(0),
},
}
for {
if err := sdec.Decode(&decSamples); err != nil {
if errors.Is(err, io.EOF) {
break
}
b.Fatal(err)
}
decSamples = decSamples[:0]
}
}
}
func readTestdataFile(tb testing.TB, file string) []byte {
tb.Helper()
f, err := os.Open(filepath.Join("testdata", file))
require.NoError(tb, err)
tb.Cleanup(func() {
_ = f.Close()
})
buf, err := io.ReadAll(f)
require.NoError(tb, err)
return buf
}
/*
export bench=v1 && go test ./model/textparse/... \
-run '^$' -bench '^BenchmarkStartTimestampPromProto' \
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkStartTimestampPromProto(b *testing.B) {
data := createTestProtoBuf(b).Bytes()
st := labels.NewSymbolTable()
p := NewProtobufParser(data, false, true, false, false, st)
found := false
Inner:
for {
t, err := p.Next()
switch t {
case EntryInvalid:
b.Fatal(err)
case EntryType:
m, _ := p.Type()
if string(m) == "go_memstats_alloc_bytes_total" {
found = true
break Inner
}
// Parser impl requires this (bug?)
case EntryHistogram:
_, _, _, _ = p.Histogram()
case EntrySeries:
_, _, _ = p.Series()
}
}
require.True(b, found)
b.Run("case=no-ct", func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
if p.StartTimestamp() != 0 {
b.Fatal("should be nil")
}
}
})
found = false
Inner2:
for {
t, err := p.Next()
switch t {
case EntryInvalid:
b.Fatal(err)
case EntryType:
m, _ := p.Type()
if string(m) == "test_counter_with_createdtimestamp" {
found = true
break Inner2
}
case EntryHistogram:
_, _, _, _ = p.Histogram()
case EntrySeries:
_, _, _ = p.Series()
}
}
require.True(b, found)
b.Run("case=ct", func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
if p.StartTimestamp() == 0 {
b.Fatal("should be not nil")
}
}
})
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/textparse/protobufparse.go | model/textparse/protobufparse.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"bytes"
"errors"
"fmt"
"io"
"math"
"unicode/utf8"
"github.com/gogo/protobuf/types"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/util/convertnhcb"
)
// ProtobufParser parses the old Prometheus protobuf format and present it
// as the text-style textparse.Parser interface.
//
// It uses a tailored streaming protobuf dto.MetricStreamingDecoder that
// reuses internal protobuf structs and allows direct unmarshalling to Prometheus
// types like labels.
type ProtobufParser struct {
dec *dto.MetricStreamingDecoder
// Used for both the string returned by Series and Histogram, as well as,
// metric family for Type, Unit and Help.
entryBytes *bytes.Buffer
lset labels.Labels
builder labels.ScratchBuilder // Held here to reduce allocations when building Labels.
// fieldPos is the position within a Summary or (legacy) Histogram. -2
// is the count. -1 is the sum. Otherwise, it is the index within
// quantiles/buckets.
fieldPos int
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram.
// exemplarPos is the position within the exemplars slice of a native histogram.
exemplarPos int
// exemplarReturned is set to true each time an exemplar has been
// returned, and set back to false upon each Next() call.
exemplarReturned bool
// state is marked by the entry we are processing. EntryInvalid implies
// that we have to decode the next MetricDescriptor.
state Entry
// Whether to completely ignore any native parts of histograms.
ignoreNativeHistograms bool
// Whether to also parse a classic histogram that is also present as a
// native histogram.
parseClassicHistograms bool
// Whether to add type and unit labels.
enableTypeAndUnitLabels bool
// Whether to convert classic histograms to native histograms with custom buckets.
convertClassicHistogramsToNHCB bool
// Reusable classic to NHCB converter.
tmpNHCB convertnhcb.TempHistogram
// We need to preload NHCB since we cannot do error handling in Histogram().
nhcbH *histogram.Histogram
nhcbFH *histogram.FloatHistogram
}
// NewProtobufParser returns a parser for the payload in the byte slice.
func NewProtobufParser(
b []byte,
ignoreNativeHistograms, parseClassicHistograms, convertClassicHistogramsToNHCB, enableTypeAndUnitLabels bool,
st *labels.SymbolTable,
) Parser {
builder := labels.NewScratchBuilderWithSymbolTable(st, 16)
builder.SetUnsafeAdd(true)
return &ProtobufParser{
dec: dto.NewMetricStreamingDecoder(b),
entryBytes: &bytes.Buffer{},
builder: builder,
state: EntryInvalid,
ignoreNativeHistograms: ignoreNativeHistograms,
parseClassicHistograms: parseClassicHistograms,
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
convertClassicHistogramsToNHCB: convertClassicHistogramsToNHCB,
tmpNHCB: convertnhcb.NewTempHistogram(),
}
}
// Series returns the bytes of a series with a simple float64 as a
// value, the timestamp if set, and the value of the current sample.
func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
var (
ts = &p.dec.TimestampMs // To save memory allocations, never nil.
v float64
)
switch p.dec.GetType() {
case dto.MetricType_COUNTER:
v = p.dec.GetCounter().GetValue()
case dto.MetricType_GAUGE:
v = p.dec.GetGauge().GetValue()
case dto.MetricType_UNTYPED:
v = p.dec.GetUntyped().GetValue()
case dto.MetricType_SUMMARY:
s := p.dec.GetSummary()
switch p.fieldPos {
case -2:
v = float64(s.GetSampleCount())
case -1:
v = s.GetSampleSum()
// Need to detect summaries without quantile here.
if len(s.GetQuantile()) == 0 {
p.fieldsDone = true
}
default:
v = s.GetQuantile()[p.fieldPos].GetValue()
}
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
// This should only happen for a classic histogram.
h := p.dec.GetHistogram()
switch p.fieldPos {
case -2:
v = h.GetSampleCountFloat()
if v == 0 {
v = float64(h.GetSampleCount())
}
case -1:
v = h.GetSampleSum()
default:
bb := h.GetBucket()
if p.fieldPos >= len(bb) {
v = h.GetSampleCountFloat()
if v == 0 {
v = float64(h.GetSampleCount())
}
} else {
v = bb[p.fieldPos].GetCumulativeCountFloat()
if v == 0 {
v = float64(bb[p.fieldPos].GetCumulativeCount())
}
}
}
default:
panic("encountered unexpected metric type, this is a bug")
}
if *ts != 0 {
return p.entryBytes.Bytes(), ts, v
}
// TODO(beorn7): We assume here that ts==0 means no timestamp. That's
// not true in general, but proto3 originally has no distinction between
// unset and default. At a later stage, the `optional` keyword was
// (re-)introduced in proto3, but gogo-protobuf never got updated to
// support it. (Note that setting `[(gogoproto.nullable) = true]` for
// the `timestamp_ms` field doesn't help, either.) We plan to migrate
// away from gogo-protobuf to an actively maintained protobuf
// implementation. Once that's done, we can simply use the `optional`
// keyword and check for the unset state explicitly.
return p.entryBytes.Bytes(), nil, v
}
// Histogram returns the bytes of a series with a native histogram as a value,
// the timestamp if set, and the native histogram in the current sample.
//
// The Compact method is called before returning the Histogram (or FloatHistogram).
//
// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0,
// the histogram is parsed and returned as a FloatHistogram and nil is returned
// as the (integer) Histogram return value. Otherwise, it is parsed and returned
// as an (integer) Histogram and nil is returned as the FloatHistogram return
// value.
func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
var (
ts = &p.dec.TimestampMs // To save memory allocations, never nil.
h = p.dec.GetHistogram()
)
if p.ignoreNativeHistograms || !isNativeHistogram(h) {
// This only happens if we have a classic histogram and
// we converted it to NHCB already in Next.
if *ts != 0 {
return p.entryBytes.Bytes(), ts, p.nhcbH, p.nhcbFH
}
return p.entryBytes.Bytes(), nil, p.nhcbH, p.nhcbFH
}
if p.parseClassicHistograms && len(h.GetBucket()) > 0 {
p.redoClassic = true
}
if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 {
// It is a float histogram.
fh := histogram.FloatHistogram{
Count: h.GetSampleCountFloat(),
Sum: h.GetSampleSum(),
ZeroThreshold: h.GetZeroThreshold(),
ZeroCount: h.GetZeroCountFloat(),
Schema: h.GetSchema(),
// Decoder reuses slices, so we need to copy.
PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())),
PositiveBuckets: make([]float64, len(h.GetPositiveCount())),
NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())),
NegativeBuckets: make([]float64, len(h.GetNegativeCount())),
}
for i, span := range h.GetPositiveSpan() {
fh.PositiveSpans[i].Offset = span.GetOffset()
fh.PositiveSpans[i].Length = span.GetLength()
}
for i, cnt := range h.GetPositiveCount() {
fh.PositiveBuckets[i] = cnt
}
for i, span := range h.GetNegativeSpan() {
fh.NegativeSpans[i].Offset = span.GetOffset()
fh.NegativeSpans[i].Length = span.GetLength()
}
for i, cnt := range h.GetNegativeCount() {
fh.NegativeBuckets[i] = cnt
}
if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM {
fh.CounterResetHint = histogram.GaugeType
}
fh.Compact(0)
if *ts != 0 {
return p.entryBytes.Bytes(), ts, nil, &fh
}
// Nasty hack: Assume that ts==0 means no timestamp. That's not true in
// general, but proto3 has no distinction between unset and
// default. Need to avoid in the final format.
return p.entryBytes.Bytes(), nil, nil, &fh
}
// TODO(bwplotka): Create sync.Pool for those structs.
sh := histogram.Histogram{
Count: h.GetSampleCount(),
Sum: h.GetSampleSum(),
ZeroThreshold: h.GetZeroThreshold(),
ZeroCount: h.GetZeroCount(),
Schema: h.GetSchema(),
PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())),
PositiveBuckets: make([]int64, len(h.GetPositiveDelta())),
NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())),
NegativeBuckets: make([]int64, len(h.GetNegativeDelta())),
}
for i, span := range h.GetPositiveSpan() {
sh.PositiveSpans[i].Offset = span.GetOffset()
sh.PositiveSpans[i].Length = span.GetLength()
}
for i, cnt := range h.GetPositiveDelta() {
sh.PositiveBuckets[i] = cnt
}
for i, span := range h.GetNegativeSpan() {
sh.NegativeSpans[i].Offset = span.GetOffset()
sh.NegativeSpans[i].Length = span.GetLength()
}
for i, cnt := range h.GetNegativeDelta() {
sh.NegativeBuckets[i] = cnt
}
if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM {
sh.CounterResetHint = histogram.GaugeType
}
sh.Compact(0)
if *ts != 0 {
return p.entryBytes.Bytes(), ts, &sh, nil
}
return p.entryBytes.Bytes(), nil, &sh, nil
}
// Help returns the metric name and help text in the current entry.
// Must only be called after Next returned a help entry.
// The returned byte slices become invalid after the next call to Next.
func (p *ProtobufParser) Help() ([]byte, []byte) {
return p.entryBytes.Bytes(), yoloBytes(p.dec.GetHelp())
}
// Type returns the metric name and type in the current entry.
// Must only be called after Next returned a type entry.
// The returned byte slices become invalid after the next call to Next.
func (p *ProtobufParser) Type() ([]byte, model.MetricType) {
n := p.entryBytes.Bytes()
switch p.dec.GetType() {
case dto.MetricType_COUNTER:
return n, model.MetricTypeCounter
case dto.MetricType_GAUGE:
return n, model.MetricTypeGauge
case dto.MetricType_HISTOGRAM:
return n, model.MetricTypeHistogram
case dto.MetricType_GAUGE_HISTOGRAM:
return n, model.MetricTypeGaugeHistogram
case dto.MetricType_SUMMARY:
return n, model.MetricTypeSummary
}
return n, model.MetricTypeUnknown
}
// Unit returns the metric unit in the current entry.
// Must only be called after Next returned a unit entry.
// The returned byte slices become invalid after the next call to Next.
func (p *ProtobufParser) Unit() ([]byte, []byte) {
return p.entryBytes.Bytes(), []byte(p.dec.GetUnit())
}
// Comment always returns nil because comments aren't supported by the protobuf
// format.
func (*ProtobufParser) Comment() []byte {
return nil
}
// Labels writes the labels of the current sample into the passed labels.
func (p *ProtobufParser) Labels(l *labels.Labels) {
*l = p.lset.Copy()
}
// Exemplar writes the exemplar of the current sample into the passed
// exemplar. It returns if an exemplar exists or not. In case of a native
// histogram, the exemplars in the native histogram will be returned.
// If this field is empty, the classic bucket section is still used for exemplars.
// To ingest all exemplars, call the Exemplar method repeatedly until it returns false.
func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
if p.exemplarReturned && p.state == EntrySeries {
// We only ever return one exemplar per (non-native-histogram) series.
return false
}
var exProto *dto.Exemplar
switch p.dec.GetType() {
case dto.MetricType_COUNTER:
exProto = p.dec.GetCounter().GetExemplar()
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
isClassic := p.state == EntrySeries
if !isClassic && len(p.dec.GetHistogram().GetExemplars()) > 0 {
exs := p.dec.GetHistogram().GetExemplars()
for p.exemplarPos < len(exs) {
exProto = exs[p.exemplarPos]
p.exemplarPos++
if exProto != nil && exProto.GetTimestamp() != nil {
break
}
}
if exProto != nil && exProto.GetTimestamp() == nil {
return false
}
} else {
bb := p.dec.GetHistogram().GetBucket()
if p.fieldPos < 0 {
if isClassic {
return false // At _count or _sum.
}
p.fieldPos = 0 // Start at 1st bucket for native histograms.
}
for p.fieldPos < len(bb) {
exProto = bb[p.fieldPos].GetExemplar()
if isClassic {
break
}
p.fieldPos++
// We deliberately drop exemplars with no timestamp only for native histograms.
if exProto != nil && (isClassic || exProto.GetTimestamp() != nil) {
break // Found a classic histogram exemplar or a native histogram exemplar with a timestamp.
}
}
// If the last exemplar for native histograms has no timestamp, ignore it.
if !isClassic && exProto.GetTimestamp() == nil {
return false
}
}
default:
return false
}
if exProto == nil {
return false
}
ex.Value = exProto.GetValue()
if ts := exProto.GetTimestamp(); ts != nil {
ex.HasTs = true
ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000)
}
p.builder.Reset()
for _, lp := range exProto.GetLabel() {
p.builder.Add(lp.GetName(), lp.GetValue())
}
p.builder.Sort()
ex.Labels = p.builder.Labels()
p.exemplarReturned = true
return true
}
// StartTimestamp returns ST or 0 if ST is not present on counters, summaries or histograms.
func (p *ProtobufParser) StartTimestamp() int64 {
var st *types.Timestamp
switch p.dec.GetType() {
case dto.MetricType_COUNTER:
st = p.dec.GetCounter().GetCreatedTimestamp()
case dto.MetricType_SUMMARY:
st = p.dec.GetSummary().GetCreatedTimestamp()
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
st = p.dec.GetHistogram().GetCreatedTimestamp()
default:
}
if st == nil {
return 0
}
// Same as the gogo proto types.TimestampFromProto but straight to integer.
// and without validation.
return st.GetSeconds()*1e3 + int64(st.GetNanos())/1e6
}
// Next advances the parser to the next "sample" (emulating the behavior of a
// text format parser). It returns (EntryInvalid, io.EOF) if no samples were
// read.
func (p *ProtobufParser) Next() (Entry, error) {
p.exemplarReturned = false
p.nhcbH = nil
p.nhcbFH = nil
switch p.state {
// Invalid state occurs on:
// * First Next() call.
// * Recursive call that tells Next to move to the next metric family.
case EntryInvalid:
p.exemplarPos = 0
p.fieldPos = -2
if err := p.dec.NextMetricFamily(); err != nil {
return p.state, err
}
if err := p.dec.NextMetric(); err != nil {
// Skip empty metric families.
if errors.Is(err, io.EOF) {
return p.Next()
}
return EntryInvalid, err
}
// We are at the beginning of a metric family. Put only the name
// into entryBytes and validate only name, help, and type for now.
name := p.dec.GetName()
if !model.UTF8Validation.IsValidMetricName(name) {
return EntryInvalid, fmt.Errorf("invalid metric name: %s", name)
}
if help := p.dec.GetHelp(); !utf8.ValidString(help) {
return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help)
}
switch p.dec.GetType() {
case dto.MetricType_COUNTER,
dto.MetricType_GAUGE,
dto.MetricType_HISTOGRAM,
dto.MetricType_GAUGE_HISTOGRAM,
dto.MetricType_SUMMARY,
dto.MetricType_UNTYPED:
// All good.
default:
return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.dec.GetType())
}
p.entryBytes.Reset()
p.entryBytes.WriteString(name)
p.state = EntryHelp
case EntryHelp:
if p.dec.Unit != "" {
p.state = EntryUnit
} else {
p.state = EntryType
}
case EntryUnit:
p.state = EntryType
case EntryType:
t := p.dec.GetType()
if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
if p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()) {
p.state = EntrySeries
p.fieldPos = -3 // We have not returned anything, let p.Next() increment it to -2.
return p.Next()
}
p.state = EntryHistogram
} else {
p.state = EntrySeries
}
if err := p.onSeriesOrHistogramUpdate(); err != nil {
return EntryInvalid, err
}
case EntrySeries:
// Potentially a second series in the metric family.
t := p.dec.GetType()
decodeNext := true
if t == dto.MetricType_SUMMARY ||
t == dto.MetricType_HISTOGRAM ||
t == dto.MetricType_GAUGE_HISTOGRAM {
// Non-trivial series (complex metrics, with magic suffixes).
isClassicHistogram := (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
(p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()))
skipSeries := p.convertClassicHistogramsToNHCB && isClassicHistogram && !p.parseClassicHistograms
// Did we iterate over all the classic representations fields?
// NOTE: p.fieldsDone is updated on p.onSeriesOrHistogramUpdate.
if !p.fieldsDone && !skipSeries {
// Still some fields to iterate over.
p.fieldPos++
if err := p.onSeriesOrHistogramUpdate(); err != nil {
return EntryInvalid, err
}
return p.state, nil
}
// Reset histogram fields.
p.fieldPos = -2
p.fieldsDone = false
p.exemplarPos = 0
// If this is a metric family containing native
// histograms, it means we are here thanks to redoClassic state.
// Return to native histograms for the consistent flow.
// If this is a metric family containing classic histograms,
// it means we might need to do NHCB conversion.
if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
if !isClassicHistogram {
p.state = EntryHistogram
} else if p.convertClassicHistogramsToNHCB {
// We still need to spit out the NHCB.
var err error
p.nhcbH, p.nhcbFH, err = p.convertToNHCB(t)
if err != nil {
return EntryInvalid, err
}
p.state = EntryHistogram
// We have an NHCB to emit, no need to decode the next series.
decodeNext = false
}
}
}
// Is there another series?
if decodeNext {
if err := p.dec.NextMetric(); err != nil {
if errors.Is(err, io.EOF) {
p.state = EntryInvalid
return p.Next()
}
return EntryInvalid, err
}
}
if err := p.onSeriesOrHistogramUpdate(); err != nil {
return EntryInvalid, err
}
case EntryHistogram:
switchToClassic := func() (Entry, error) {
p.redoClassic = false
p.fieldPos = -3
p.fieldsDone = false
p.state = EntrySeries
return p.Next() // Switch to classic histogram.
}
// Was Histogram() called and parseClassicHistograms is true?
if p.redoClassic {
return switchToClassic()
}
// Is there another series?
if err := p.dec.NextMetric(); err != nil {
if errors.Is(err, io.EOF) {
p.state = EntryInvalid
return p.Next()
}
return EntryInvalid, err
}
// If this metric is not a native histograms or we are ignoring
// native histograms, it means we are here thanks to NHCB
// conversion. Return to classic histograms for the consistent
// flow.
if p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()) {
return switchToClassic()
}
if err := p.onSeriesOrHistogramUpdate(); err != nil {
return EntryInvalid, err
}
default:
return EntryInvalid, fmt.Errorf("invalid protobuf parsing state: %d", p.state)
}
return p.state, nil
}
// onSeriesOrHistogramUpdate updates internal state before returning
// a series or histogram. It updates:
// * p.lset.
// * p.entryBytes.
// * p.fieldsDone depending on p.fieldPos.
func (p *ProtobufParser) onSeriesOrHistogramUpdate() error {
p.builder.Reset()
if p.enableTypeAndUnitLabels {
_, typ := p.Type()
m := schema.Metadata{
Name: p.getMagicName(),
Type: typ,
Unit: p.dec.GetUnit(),
}
m.AddToLabels(&p.builder)
if err := p.dec.Label(m.NewIgnoreOverriddenMetadataLabelScratchBuilder(&p.builder)); err != nil {
return err
}
} else {
p.builder.Add(labels.MetricName, p.getMagicName())
if err := p.dec.Label(&p.builder); err != nil {
return err
}
}
if needed, name, value := p.getMagicLabel(); needed {
p.builder.Add(name, value)
}
// Sort labels to maintain the sorted labels invariant.
p.builder.Sort()
p.builder.Overwrite(&p.lset)
// entryBytes has to be unique for each series.
p.entryBytes.Reset()
p.lset.Range(func(l labels.Label) {
if l.Name == labels.MetricName {
p.entryBytes.WriteString(l.Value)
return
}
p.entryBytes.WriteByte(model.SeparatorByte)
p.entryBytes.WriteString(l.Name)
p.entryBytes.WriteByte(model.SeparatorByte)
p.entryBytes.WriteString(l.Value)
})
return nil
}
// getMagicName usually just returns p.mf.GetType() but adds a magic suffix
// ("_count", "_sum", "_bucket") if needed according to the current parser
// state.
func (p *ProtobufParser) getMagicName() string {
t := p.dec.GetType()
if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) {
return p.dec.GetName()
}
if p.fieldPos == -2 {
return p.dec.GetName() + "_count"
}
if p.fieldPos == -1 {
return p.dec.GetName() + "_sum"
}
if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
return p.dec.GetName() + "_bucket"
}
return p.dec.GetName()
}
// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if
// so, its name and value. It also sets p.fieldsDone if applicable.
func (p *ProtobufParser) getMagicLabel() (bool, string, string) {
// Native histogram or _count and _sum series.
if p.state == EntryHistogram || p.fieldPos < 0 {
return false, "", ""
}
switch p.dec.GetType() {
case dto.MetricType_SUMMARY:
qq := p.dec.GetSummary().GetQuantile()
q := qq[p.fieldPos]
p.fieldsDone = p.fieldPos == len(qq)-1
return true, model.QuantileLabel, labels.FormatOpenMetricsFloat(q.GetQuantile())
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
bb := p.dec.GetHistogram().GetBucket()
if p.fieldPos >= len(bb) {
p.fieldsDone = true
return true, model.BucketLabel, "+Inf"
}
b := bb[p.fieldPos]
p.fieldsDone = math.IsInf(b.GetUpperBound(), +1)
return true, model.BucketLabel, labels.FormatOpenMetricsFloat(b.GetUpperBound())
}
return false, "", ""
}
// isNativeHistogram returns false iff the provided histograms has no spans at
// all (neither positive nor negative) and a zero threshold of 0 and a zero
// count of 0. In principle, this could still be meant to be a native histogram
// with a zero threshold of 0 and no observations yet. In that case,
// instrumentation libraries should add a "no-op" span (e.g. length zero, offset
// zero) to signal that the histogram is meant to be parsed as a native
// histogram. Failing to do so will cause Prometheus to parse it as a classic
// histogram as long as no observations have happened.
func isNativeHistogram(h *dto.Histogram) bool {
return len(h.GetPositiveSpan()) > 0 ||
len(h.GetNegativeSpan()) > 0 ||
h.GetZeroThreshold() > 0 ||
h.GetZeroCount() > 0
}
func (p *ProtobufParser) convertToNHCB(t dto.MetricType) (*histogram.Histogram, *histogram.FloatHistogram, error) {
h := p.dec.GetHistogram()
p.tmpNHCB.Reset()
// TODO(krajorama): convertnhcb should support setting integer mode up
// front since we know it here. That would avoid the converter having
// to guess it based on counts.
v := h.GetSampleCountFloat()
if v == 0 {
v = float64(h.GetSampleCount())
}
if err := p.tmpNHCB.SetCount(v); err != nil {
return nil, nil, err
}
if err := p.tmpNHCB.SetSum(h.GetSampleSum()); err != nil {
return nil, nil, err
}
for _, b := range h.GetBucket() {
v := b.GetCumulativeCountFloat()
if v == 0 {
v = float64(b.GetCumulativeCount())
}
if err := p.tmpNHCB.SetBucketCount(b.GetUpperBound(), v); err != nil {
return nil, nil, err
}
}
ch, cfh, err := p.tmpNHCB.Convert()
if err != nil {
return nil, nil, err
}
if t == dto.MetricType_GAUGE_HISTOGRAM {
if ch != nil {
ch.CounterResetHint = histogram.GaugeType
} else {
cfh.CounterResetHint = histogram.GaugeType
}
}
return ch, cfh, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/textparse/openmetricsparse_test.go | model/textparse/openmetricsparse_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"fmt"
"io"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
)
func int64p(x int64) *int64 { return &x }
func TestOpenMetricsParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
# UNIT go_gc_duration_seconds seconds
go_gc_duration_seconds{quantile="0"} 4.9351e-05
go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
# HELP nohelp1
# HELP help2 escape \ \n \\ \" \x chars
# UNIT nounit
go_gc_duration_seconds{quantile="1.0",a="b"} 8.3835e-05
go_gc_duration_seconds_count 99
some:aggregate:rate5m{a_b="c"} 1
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 33 123.123
# TYPE hh histogram
hh_bucket{le="+Inf"} 1
# TYPE gh gaugehistogram
gh_bucket{le="+Inf"} 1
# TYPE hhh histogram
hhh_bucket{le="+Inf"} 1 # {id="histogram-bucket-test"} 4
hhh_count 1 # {id="histogram-count-test"} 4
# TYPE ggh gaugehistogram
ggh_bucket{le="+Inf"} 1 # {id="gaugehistogram-bucket-test",xx="yy"} 4 123.123
ggh_count 1 # {id="gaugehistogram-count-test",xx="yy"} 4 123.123
# TYPE smr_seconds summary
smr_seconds_count 2.0 # {id="summary-count-test"} 1 123.321
smr_seconds_sum 42.0 # {id="summary-sum-test"} 1 123.321
# TYPE ii info
ii{foo="bar"} 1
# TYPE ss stateset
ss{ss="foo"} 1
ss{ss="bar"} 0
ss{A="a"} 0
# TYPE un unknown
_metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
# HELP foo Counter with and without labels to certify ST is parsed for both cases
# TYPE foo counter
foo_total 17.0 1520879607.789 # {id="counter-test"} 5
foo_created 1520872607.123
foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
foo_created{a="b"} 1520872607.123
foo_total{le="c"} 21.0
foo_created{le="c"} 1520872621.123
foo_total{le="1"} 10.0
# HELP bar Summary with ST at the end, making sure we find ST even if it's multiple lines a far
# TYPE bar summary
bar_count 17.0
bar_sum 324789.3
bar{quantile="0.95"} 123.7
bar{quantile="0.99"} 150.0
bar_created 1520872608.124
# HELP baz Histogram with the same objective as above's summary
# TYPE baz histogram
baz_bucket{le="0.0"} 0
baz_bucket{le="+Inf"} 17
baz_count 17
baz_sum 324789.3
baz_created 1520872609.125
# HELP fizz_created Gauge which shouldn't be parsed as ST
# TYPE fizz_created gauge
fizz_created 17.0
# HELP something Histogram with _created between buckets and summary
# TYPE something histogram
something_count 18
something_sum 324789.4
something_created 1520430001
something_bucket{le="0.0"} 1
something_bucket{le="1"} 2
something_bucket{le="+Inf"} 18
# HELP yum Summary with _created between sum and quantiles
# TYPE yum summary
yum_count 20
yum_sum 324789.5
yum_created 1520430003
yum{quantile="0.95"} 123.7
yum{quantile="0.99"} 150.0
# HELP foobar Summary with _created as the first line
# TYPE foobar summary
foobar_count 21
foobar_created 1520430004
foobar_sum 324789.6
foobar{quantile="0.95"} 123.8
foobar{quantile="0.99"} 150.1`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n"
for _, typeAndUnitEnabled := range []bool{false, true} {
t.Run(fmt.Sprintf("type-and-unit=%v", typeAndUnitEnabled), func(t *testing.T) {
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: "go_gc_duration_seconds",
unit: "seconds",
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.0"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
),
}, {
m: `go_gc_duration_seconds{quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.25"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.5", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
),
}, {
m: "nohelp1",
help: "",
}, {
m: "help2",
help: "escape \\ \n \\ \" \\x chars",
}, {
m: "nounit",
unit: "",
}, {
m: `go_gc_duration_seconds{quantile="1.0",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "1.0", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), model.MetricTypeSummary),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_goroutines", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "go_goroutines"),
),
}, {
m: "hh",
typ: model.MetricTypeHistogram,
}, {
m: `hh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"),
),
}, {
m: "gh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `gh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "gh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
),
}, {
m: "hhh",
typ: model.MetricTypeHistogram,
}, {
m: `hhh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hhh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
},
}, {
m: `hhh_count`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hhh_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "hhh_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
},
}, {
m: "ggh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `ggh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ggh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: `ggh_count`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ggh_count", "__type__", string(model.MetricTypeGaugeHistogram)),
labels.FromStrings("__name__", "ggh_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: "smr_seconds",
typ: model.MetricTypeSummary,
}, {
m: `smr_seconds_count`,
v: 2,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "smr_seconds_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "smr_seconds_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: `smr_seconds_sum`,
v: 42,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "smr_seconds_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "smr_seconds_sum"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: "ii",
typ: model.MetricTypeInfo,
}, {
m: `ii{foo="bar"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ii", "__type__", string(model.MetricTypeInfo), "foo", "bar"),
labels.FromStrings("__name__", "ii", "foo", "bar"),
),
}, {
m: "ss",
typ: model.MetricTypeStateset,
}, {
m: `ss{ss="foo"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "foo"),
labels.FromStrings("__name__", "ss", "ss", "foo"),
),
}, {
m: `ss{ss="bar"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "bar"),
labels.FromStrings("__name__", "ss", "ss", "bar"),
),
}, {
m: `ss{A="a"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "A", "a"),
labels.FromStrings("__name__", "ss", "A", "a"),
),
}, {
m: "un",
typ: model.MetricTypeUnknown,
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify ST is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
}, {
m: "foo_total",
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter)),
labels.FromStrings("__name__", "foo_total"),
),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
st: 1520872607123,
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "a", "b"),
labels.FromStrings("__name__", "foo_total", "a", "b"),
),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
st: 1520872607123,
}, {
m: `foo_total{le="c"}`,
v: 21.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "c"),
labels.FromStrings("__name__", "foo_total", "le", "c"),
),
st: 1520872621123,
}, {
m: `foo_total{le="1"}`,
v: 10.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "1"),
labels.FromStrings("__name__", "foo_total", "le", "1"),
),
}, {
m: "bar",
help: "Summary with ST at the end, making sure we find ST even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "bar_count"),
),
st: 1520872608124,
}, {
m: "bar_sum",
v: 324789.3,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "bar_sum"),
),
st: 1520872608124,
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "bar", "quantile", "0.95"),
),
st: 1520872608124,
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "bar", "quantile", "0.99"),
),
st: 1520872608124,
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"),
labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
),
st: 1520872609125,
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
),
st: 1520872609125,
}, {
m: `baz_count`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "baz_count"),
),
st: 1520872609125,
}, {
m: `baz_sum`,
v: 324789.3,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_sum", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "baz_sum"),
),
st: 1520872609125,
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as ST",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "fizz_created", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "fizz_created"),
),
}, {
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something_count`,
v: 18,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "something_count"),
),
st: 1520430001000,
}, {
m: `something_sum`,
v: 324789.4,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_sum", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "something_sum"),
),
st: 1520430001000,
}, {
m: `something_bucket{le="0.0"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"),
labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
),
st: 1520430001000,
}, {
m: `something_bucket{le="1"}`,
v: 2,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "1.0"),
labels.FromStrings("__name__", "something_bucket", "le", "1.0"),
),
st: 1520430001000,
}, {
m: `something_bucket{le="+Inf"}`,
v: 18,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "something_bucket", "le", "+Inf"),
),
st: 1520430001000,
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
}, {
m: "yum",
typ: model.MetricTypeSummary,
}, {
m: `yum_count`,
v: 20,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "yum_count"),
),
st: 1520430003000,
}, {
m: `yum_sum`,
v: 324789.5,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "yum_sum"),
),
st: 1520430003000,
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "yum", "quantile", "0.95"),
),
st: 1520430003000,
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "yum", "quantile", "0.99"),
),
st: 1520430003000,
}, {
m: "foobar",
help: "Summary with _created as the first line",
}, {
m: "foobar",
typ: model.MetricTypeSummary,
}, {
m: `foobar_count`,
v: 21,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "foobar_count"),
),
st: 1520430004000,
}, {
m: `foobar_sum`,
v: 324789.6,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "foobar_sum"),
),
st: 1520430004000,
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
),
st: 1520430004000,
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
),
st: 1520430004000,
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), model.MetricTypeSummary),
},
}
opts := []OpenMetricsOption{WithOMParserSTSeriesSkipped()}
if typeAndUnitEnabled {
opts = append(opts, WithOMParserTypeAndUnitLabels())
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), opts...)
got := testParse(t, p)
requireEntries(t, exp, got)
})
}
}
func TestOpenMetricsParse_UTF8(t *testing.T) {
input := `# HELP "go.gc_duration_seconds" A summary of the GC invocation durations.
# TYPE "go.gc_duration_seconds" summary
# UNIT "go.gc_duration_seconds" seconds
{"go.gc_duration_seconds",quantile="0"} 4.9351e-05
{"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05
{"go.gc_duration_seconds_created"} 1520872607.123
{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
{q="0.9","http.status",a="b"} 8.3835e-05
{"go.gc_duration_seconds_sum"} 0.004304266
{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0
quotedexemplar_count 1 # {"id.thing"="histogram-count-test"} 4
quotedexemplar2_count 1 # {"id.thing"="histogram-count-test",other="hello"} 4
`
input += "# EOF\n"
exp := []parsedEntry{
{
m: "go.gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go.gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: "go.gc_duration_seconds",
unit: "seconds",
}, {
m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"),
st: 1520872607123,
}, {
m: `{"go.gc_duration_seconds",quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"),
st: 1520872607123,
}, {
m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.5", "a", "b"),
}, {
m: `{"http.status",q="0.9",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "http.status", "q", "0.9", "a", "b"),
}, {
m: `{"http.status",q="0.9",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "http.status", "q", "0.9", "a", "b"),
}, {
m: `{q="0.9","http.status",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "http.status", "q", "0.9", "a", "b"),
}, {
m: `{"go.gc_duration_seconds_sum"}`,
v: 0.004304266,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds_sum"),
}, {
m: `{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"}`,
v: 10.0,
lset: labels.FromStrings("__name__", `Heizölrückstoßabdämpfung 10€ metric with "interesting" {character
choices}`, "strange©™\n'quoted' \"name\"", "6"),
}, {
m: `quotedexemplar_count`,
v: 1,
lset: labels.FromStrings("__name__", "quotedexemplar_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id.thing", "histogram-count-test"), Value: 4},
},
}, {
m: `quotedexemplar2_count`,
v: 1,
lset: labels.FromStrings("__name__", "quotedexemplar2_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id.thing", "histogram-count-test", "other", "hello"), Value: 4},
},
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserSTSeriesSkipped())
got := testParse(t, p)
requireEntries(t, exp, got)
}
func TestOpenMetricsParseErrors(t *testing.T) {
cases := []struct {
input string
err string
}{
// Happy cases. EOF is returned by the parser at the end of valid
// data.
{
input: "# EOF",
err: "EOF",
},
{
input: "# EOF\n",
err: "EOF",
},
// Unhappy cases.
{
input: "",
err: "data does not end with # EOF",
},
{
input: "\n",
err: "expected a valid start token, got \"\\n\" (\"INVALID\") while parsing: \"\\n\"",
},
{
input: "metric",
err: "expected value after metric, got \"metric\" (\"EOF\") while parsing: \"metric\"",
},
{
input: "metric 1",
err: "data does not end with # EOF",
},
{
input: "metric 1\n",
err: "data does not end with # EOF",
},
{
input: "metric_total 1 # {aa=\"bb\"} 4",
err: "data does not end with # EOF",
},
{
input: "a\n#EOF\n",
err: "expected value after metric, got \"\\n\" (\"INVALID\") while parsing: \"a\\n\"",
},
{
input: "\n\n#EOF\n",
err: "expected a valid start token, got \"\\n\" (\"INVALID\") while parsing: \"\\n\"",
},
{
input: " a 1\n#EOF\n",
err: "expected a valid start token, got \" \" (\"INVALID\") while parsing: \" \"",
},
{
input: "9\n#EOF\n",
err: "expected a valid start token, got \"9\" (\"INVALID\") while parsing: \"9\"",
},
{
input: "# TYPE u untyped\n#EOF\n",
err: "invalid metric type \"untyped\"",
},
{
input: "# TYPE c counter \n#EOF\n",
err: "invalid metric type \"counter \"",
},
{
input: "# TYPE c counter\n#EOF\n",
err: "expected a valid start token, got \"# \" (\"INVALID\") while parsing: \"# \"",
},
{
input: "# TYPE \n#EOF\n",
err: "expected metric name after TYPE, got \"\\n\" (\"INVALID\") while parsing: \"# TYPE \\n\"",
},
{
input: "# TYPE m\n#EOF\n",
err: "expected text in TYPE",
},
{
input: "# UNIT metric suffix\n#EOF\n",
err: "unit \"suffix\" not a suffix of metric \"metric\"",
},
{
input: "# UNIT metricsuffix suffix\n#EOF\n",
err: "unit \"suffix\" not a suffix of metric \"metricsuffix\"",
},
{
input: "# UNIT m suffix\n#EOF\n",
err: "unit \"suffix\" not a suffix of metric \"m\"",
},
{
input: "# UNIT \n#EOF\n",
err: "expected metric name after UNIT, got \"\\n\" (\"INVALID\") while parsing: \"# UNIT \\n\"",
},
{
input: "# UNIT m\n#EOF\n",
err: "expected text in UNIT",
},
{
input: "# HELP \n#EOF\n",
err: "expected metric name after HELP, got \"\\n\" (\"INVALID\") while parsing: \"# HELP \\n\"",
},
{
input: "# HELP m\n#EOF\n",
err: "expected text in HELP",
},
{
input: "a\t1\n#EOF\n",
err: "expected value after metric, got \"\\t\" (\"INVALID\") while parsing: \"a\\t\"",
},
{
input: "a 1\t2\n#EOF\n",
err: "strconv.ParseFloat: parsing \"1\\t2\": invalid syntax while parsing: \"a 1\\t2\"",
},
{
input: "a 1 2 \n#EOF\n",
err: "expected next entry after timestamp, got \" \\n\" (\"INVALID\") while parsing: \"a 1 2 \\n\"",
},
{
input: "a 1 2 #\n#EOF\n",
err: "expected next entry after timestamp, got \" #\\n\" (\"TIMESTAMP\") while parsing: \"a 1 2 #\\n\"",
},
{
input: "a 1 1z\n#EOF\n",
err: "strconv.ParseFloat: parsing \"1z\": invalid syntax while parsing: \"a 1 1z\"",
},
{
input: " # EOF\n",
err: "expected a valid start token, got \" \" (\"INVALID\") while parsing: \" \"",
},
{
input: "# EOF\na 1",
err: "unexpected data after # EOF",
},
{
input: "# EOF\n\n",
err: "unexpected data after # EOF",
},
{
input: "# EOFa 1",
err: "unexpected data after # EOF",
},
{
input: "#\tTYPE c counter\n",
err: "expected a valid start token, got \"#\\t\" (\"INVALID\") while parsing: \"#\\t\"",
},
{
input: "# TYPE c counter\n",
err: "invalid metric type \" counter\"",
},
{
input: "a 1 1 1\n# EOF\n",
err: "expected next entry after timestamp, got \" 1\\n\" (\"TIMESTAMP\") while parsing: \"a 1 1 1\\n\"",
},
{
input: "a{b='c'} 1\n# EOF\n",
err: "expected label value, got \"'\" (\"INVALID\") while parsing: \"a{b='\"",
},
{
input: "a{,b=\"c\"} 1\n# EOF\n",
err: "expected label name, got \",b\" (\"COMMA\") while parsing: \"a{,b\"",
},
{
input: "a{b=\"c\"d=\"e\"} 1\n# EOF\n",
err: "expected comma or brace close, got \"d=\" (\"LNAME\") while parsing: \"a{b=\\\"c\\\"d=\"",
},
{
input: "a{b=\"c\",,d=\"e\"} 1\n# EOF\n",
err: "expected label name, got \",d\" (\"COMMA\") while parsing: \"a{b=\\\"c\\\",,d\"",
},
{
input: "a{b=\n# EOF\n",
err: "expected label value, got \"\\n\" (\"INVALID\") while parsing: \"a{b=\\n\"",
},
{
input: "a{\xff=\"foo\"} 1\n# EOF\n",
err: "expected label name, got \"\\xff\" (\"INVALID\") while parsing: \"a{\\xff\"",
},
{
input: "a{b=\"\xff\"} 1\n# EOF\n",
err: "invalid UTF-8 label value: \"\\\"\\xff\\\"\"",
},
{
input: `{"a","b = "c"}
# EOF
`,
err: "expected equal, got \"c\\\"\" (\"LNAME\") while parsing: \"{\\\"a\\\",\\\"b = \\\"c\\\"\"",
},
{
input: `{"a",b\nc="d"} 1
# EOF
`,
err: "expected equal, got \"\\\\\" (\"INVALID\") while parsing: \"{\\\"a\\\",b\\\\\"",
},
{
input: "a true\n",
err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"a true\"",
},
{
input: "something_weird{problem=\"\n# EOF\n",
err: "expected label value, got \"\\\"\\n\" (\"INVALID\") while parsing: \"something_weird{problem=\\\"\\n\"",
},
{
input: "empty_label_name{=\"\"} 0\n# EOF\n",
err: "expected label name, got \"=\\\"\" (\"EQUAL\") while parsing: \"empty_label_name{=\\\"\"",
},
{
input: "foo 1_2\n\n# EOF\n",
err: "unsupported character in float while parsing: \"foo 1_2\"",
},
{
input: "foo 0x1p-3\n\n# EOF\n",
err: "unsupported character in float while parsing: \"foo 0x1p-3\"",
},
{
input: "foo 0x1P-3\n\n# EOF\n",
err: "unsupported character in float while parsing: \"foo 0x1P-3\"",
},
{
input: "foo 0 1_2\n\n# EOF\n",
err: "unsupported character in float while parsing: \"foo 0 1_2\"",
},
{
input: "custom_metric_total 1 # {aa=bb}\n# EOF\n",
err: "expected label value, got \"b\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=b\"",
},
{
input: "custom_metric_total 1 # {aa=\"bb\"}\n# EOF\n",
err: "expected value after exemplar labels, got \"\\n\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"}\\n\"",
},
{
input: `custom_metric_total 1 # {aa="bb"}`,
err: "expected value after exemplar labels, got \"}\" (\"EOF\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"}\"",
},
{
input: `custom_metric_total 1 # {bb}`,
err: "expected label name, got \"}\" (\"BCLOSE\") while parsing: \"custom_metric_total 1 # {bb}\"",
},
{
input: `custom_metric_total 1 # {bb, a="dd"}`,
err: "expected label name, got \", \" (\"COMMA\") while parsing: \"custom_metric_total 1 # {bb, \"",
},
{
input: `custom_metric_total 1 # {aa="bb",,cc="dd"} 1`,
err: "expected label name, got \",c\" (\"COMMA\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\",,c\"",
},
{
input: `custom_metric_total 1 # {aa="bb"} 1_2`,
err: "unsupported character in float while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"} 1_2\"",
},
{
input: `custom_metric_total 1 # {aa="bb"} 0x1p-3`,
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/textparse/interface_test.go | model/textparse/interface_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"errors"
"io"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/testutil"
)
func TestNewParser(t *testing.T) {
t.Parallel()
requireNilParser := func(t *testing.T, p Parser) {
require.Nil(t, p)
}
requirePromParser := func(t *testing.T, p Parser) {
require.NotNil(t, p)
_, ok := p.(*PromParser)
require.True(t, ok)
}
requireOpenMetricsParser := func(t *testing.T, p Parser) {
require.NotNil(t, p)
_, ok := p.(*OpenMetricsParser)
require.True(t, ok)
}
requireProtobufParser := func(t *testing.T, p Parser) {
require.NotNil(t, p)
_, ok := p.(*ProtobufParser)
require.True(t, ok)
}
for name, tt := range map[string]*struct {
contentType string
fallbackScrapeProtocol config.ScrapeProtocol
validateParser func(*testing.T, Parser)
err string
}{
"empty-string": {
validateParser: requireNilParser,
err: "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target",
},
"empty-string-fallback-text-plain": {
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText0_0_4,
err: "non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol \"text/plain\"",
},
"invalid-content-type-1": {
contentType: "invalid/",
validateParser: requireNilParser,
err: "expected token after slash",
},
"invalid-content-type-1-fallback-text-plain": {
contentType: "invalid/",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText0_0_4,
err: "expected token after slash",
},
"invalid-content-type-1-fallback-openmetrics": {
contentType: "invalid/",
validateParser: requireOpenMetricsParser,
fallbackScrapeProtocol: config.OpenMetricsText0_0_1,
err: "expected token after slash",
},
"invalid-content-type-1-fallback-protobuf": {
contentType: "invalid/",
validateParser: requireProtobufParser,
fallbackScrapeProtocol: config.PrometheusProto,
err: "expected token after slash",
},
"invalid-content-type-2": {
contentType: "invalid/invalid/invalid",
validateParser: requireNilParser,
err: "unexpected content after media subtype",
},
"invalid-content-type-2-fallback-text-plain": {
contentType: "invalid/invalid/invalid",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText1_0_0,
err: "unexpected content after media subtype",
},
"invalid-content-type-3": {
contentType: "/",
validateParser: requireNilParser,
err: "no media type",
},
"invalid-content-type-3-fallback-text-plain": {
contentType: "/",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText1_0_0,
err: "no media type",
},
"invalid-content-type-4": {
contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8",
validateParser: requireNilParser,
err: "duplicate parameter name",
},
"invalid-content-type-4-fallback-open-metrics": {
contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8",
validateParser: requireOpenMetricsParser,
fallbackScrapeProtocol: config.OpenMetricsText1_0_0,
err: "duplicate parameter name",
},
"openmetrics": {
contentType: "application/openmetrics-text",
validateParser: requireOpenMetricsParser,
},
"openmetrics-with-charset": {
contentType: "application/openmetrics-text; charset=utf-8",
validateParser: requireOpenMetricsParser,
},
"openmetrics-with-charset-and-version": {
contentType: "application/openmetrics-text; version=1.0.0; charset=utf-8",
validateParser: requireOpenMetricsParser,
},
"plain-text": {
contentType: "text/plain",
validateParser: requirePromParser,
},
"protobuf": {
contentType: "application/vnd.google.protobuf",
validateParser: requireProtobufParser,
},
"plain-text-with-version": {
contentType: "text/plain; version=0.0.4",
validateParser: requirePromParser,
},
"some-other-valid-content-type": {
contentType: "text/html",
validateParser: requireNilParser,
err: "received unsupported Content-Type \"text/html\" and no fallback_scrape_protocol specified for target",
},
"some-other-valid-content-type-fallback-text-plain": {
contentType: "text/html",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText0_0_4,
err: "received unsupported Content-Type \"text/html\", using fallback_scrape_protocol \"text/plain\"",
},
} {
t.Run(name, func(t *testing.T) {
tt := tt // Copy to local variable before going parallel.
t.Parallel()
fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType()
p, err := New([]byte{}, tt.contentType, labels.NewSymbolTable(), ParserOptions{FallbackContentType: fallbackProtoMediaType})
tt.validateParser(t, p)
if tt.err == "" {
require.NoError(t, err)
} else {
require.ErrorContains(t, err, tt.err)
}
})
}
}
// parsedEntry represents data that is parsed for each entry.
type parsedEntry struct {
// In all but EntryComment, EntryInvalid.
m string
// In EntryHistogram.
shs *histogram.Histogram
fhs *histogram.FloatHistogram
// In EntrySeries.
v float64
// In EntrySeries and EntryHistogram.
lset labels.Labels
t *int64
es []exemplar.Exemplar
st int64
// In EntryType.
typ model.MetricType
// In EntryHelp.
help string
// In EntryUnit.
unit string
// In EntryComment.
comment string
}
func requireEntries(t *testing.T, exp, got []parsedEntry) {
t.Helper()
testutil.RequireEqualWithOptions(t, exp, got, []cmp.Option{
// We reuse slices so we sometimes have empty vs nil differences
// we need to ignore with cmpopts.EquateEmpty().
// However we have to filter out labels, as only
// one comparer per type has to be specified,
// and RequireEqualWithOptions uses
// cmp.Comparer(labels.Equal).
cmp.FilterValues(func(x, y any) bool {
_, xIsLabels := x.(labels.Labels)
_, yIsLabels := y.(labels.Labels)
return !xIsLabels && !yIsLabels
}, cmpopts.EquateEmpty()),
cmp.AllowUnexported(parsedEntry{}),
})
}
func testParse(t *testing.T, p Parser) (ret []parsedEntry) {
t.Helper()
for {
et, err := p.Next()
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)
var got parsedEntry
var m []byte
switch et {
case EntryInvalid:
t.Fatal("entry invalid not expected")
case EntrySeries, EntryHistogram:
var ts *int64
if et == EntrySeries {
m, ts, got.v = p.Series()
} else {
m, ts, got.shs, got.fhs = p.Histogram()
}
if ts != nil {
// TODO(bwplotka): Change to 0 in the interface for set check to
// avoid pointer mangling.
got.t = int64p(*ts)
}
got.m = string(m)
p.Labels(&got.lset)
got.st = p.StartTimestamp()
for e := (exemplar.Exemplar{}); p.Exemplar(&e); {
got.es = append(got.es, e)
}
case EntryType:
m, got.typ = p.Type()
got.m = string(m)
case EntryHelp:
m, h := p.Help()
got.m = string(m)
got.help = string(h)
case EntryUnit:
m, u := p.Unit()
got.m = string(m)
got.unit = string(u)
case EntryComment:
got.comment = string(p.Comment())
}
ret = append(ret, got)
}
return ret
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/textparse/nhcbparse.go | model/textparse/nhcbparse.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"errors"
"io"
"math"
"strconv"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/convertnhcb"
)
type collectionState int
const (
stateStart collectionState = iota
stateCollecting
stateEmitting
stateInhibiting // Inhibiting NHCB, because there was an exponential histogram with the same labels.
)
// The NHCBParser wraps a Parser and converts classic histograms to native
// histograms with custom buckets.
//
// Since Parser interface is line based, this parser needs to keep track
// of the last classic histogram series it saw to collate them into a
// single native histogram.
//
// Note:
// - Only series that have the histogram metadata type are considered for
// conversion.
// - The classic series are also returned if keepClassicHistograms is true.
type NHCBParser struct {
// The parser we're wrapping.
parser Parser
// Option to keep classic histograms along with converted histograms.
keepClassicHistograms bool
// Labels builder.
builder labels.ScratchBuilder
// State of the parser.
state collectionState
// Caches the values from the underlying parser.
// For Series and Histogram.
bytes []byte
ts *int64
value float64
h *histogram.Histogram
fh *histogram.FloatHistogram
// For Metric.
lset labels.Labels
// For Type.
bName []byte
typ model.MetricType
// Caches the entry itself if we are inserting a converted NHCB
// halfway through.
entry Entry
err error
// Caches the values and metric for the inserted converted NHCB.
bytesNHCB []byte
hNHCB *histogram.Histogram
fhNHCB *histogram.FloatHistogram
lsetNHCB labels.Labels
exemplars []exemplar.Exemplar
stNHCB int64
metricStringNHCB string
// Collates values from the classic histogram series to build
// the converted histogram later.
tempLsetNHCB labels.Labels
tempNHCB convertnhcb.TempHistogram
tempExemplars []exemplar.Exemplar
tempExemplarCount int
tempST int64
// Remembers the last base histogram metric name (assuming it's
// a classic histogram) so we can tell if the next float series
// is part of the same classic histogram.
lastHistogramName string
lastHistogramLabelsHash uint64
// Reused buffer for hashing labels.
hBuffer []byte
}
func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser {
return &NHCBParser{
parser: p,
keepClassicHistograms: keepClassicHistograms,
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
tempNHCB: convertnhcb.NewTempHistogram(),
}
}
func (p *NHCBParser) Series() ([]byte, *int64, float64) {
return p.bytes, p.ts, p.value
}
func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
if p.state == stateEmitting {
return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB
}
return p.bytes, p.ts, p.h, p.fh
}
func (p *NHCBParser) Help() ([]byte, []byte) {
return p.parser.Help()
}
func (p *NHCBParser) Type() ([]byte, model.MetricType) {
return p.bName, p.typ
}
func (p *NHCBParser) Unit() ([]byte, []byte) {
return p.parser.Unit()
}
func (p *NHCBParser) Comment() []byte {
return p.parser.Comment()
}
func (p *NHCBParser) Labels(l *labels.Labels) {
if p.state == stateEmitting {
*l = p.lsetNHCB
return
}
*l = p.lset
}
func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool {
if p.state == stateEmitting {
if len(p.exemplars) == 0 {
return false
}
*ex = p.exemplars[0]
p.exemplars = p.exemplars[1:]
return true
}
return p.parser.Exemplar(ex)
}
func (p *NHCBParser) StartTimestamp() int64 {
switch p.state {
case stateStart, stateInhibiting:
if p.entry == EntrySeries || p.entry == EntryHistogram {
return p.parser.StartTimestamp()
}
case stateCollecting:
return p.tempST
case stateEmitting:
return p.stNHCB
}
return 0
}
func (p *NHCBParser) Next() (Entry, error) {
for {
if p.state == stateEmitting {
p.state = stateStart
if p.entry == EntrySeries {
isNHCB := p.handleClassicHistogramSeries(p.lset)
if isNHCB && !p.keepClassicHistograms {
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
continue
}
}
return p.entry, p.err
}
p.entry, p.err = p.parser.Next()
if p.err != nil {
if errors.Is(p.err, io.EOF) && p.processNHCB() {
return EntryHistogram, nil
}
return EntryInvalid, p.err
}
switch p.entry {
case EntrySeries:
p.bytes, p.ts, p.value = p.parser.Series()
p.parser.Labels(&p.lset)
var isNHCB bool
switch p.state {
case stateCollecting:
if p.differentMetric() && p.processNHCB() {
// We are collecting classic series, but the next series
// has different type or labels. If we can convert what
// we have collected so far to NHCB, then we can return it.
return EntryHistogram, nil
}
isNHCB = p.handleClassicHistogramSeries(p.lset)
case stateInhibiting:
if p.differentMetric() {
// Next has different labels than the previous exponential
// histogram so we can start collecting classic histogram
// series.
p.state = stateStart
isNHCB = p.handleClassicHistogramSeries(p.lset)
} else {
// Next has the same labels as the previous exponential
// histogram, so we are still in the inhibiting state and
// we should not convert to NHCB.
isNHCB = false
}
case stateStart:
isNHCB = p.handleClassicHistogramSeries(p.lset)
default:
// This should not happen.
return EntryInvalid, errors.New("unexpected state in NHCBParser")
}
if isNHCB && !p.keepClassicHistograms {
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
continue
}
return p.entry, p.err
case EntryHistogram:
p.state = stateInhibiting
p.bytes, p.ts, p.h, p.fh = p.parser.Histogram()
p.parser.Labels(&p.lset)
p.storeExponentialLabels()
case EntryType:
p.bName, p.typ = p.parser.Type()
}
if p.processNHCB() {
return EntryHistogram, nil
}
return p.entry, p.err
}
}
// Return true if labels have changed and we should emit the NHCB.
func (p *NHCBParser) differentMetric() bool {
if p.typ != model.MetricTypeHistogram {
// Different metric type.
return true
}
_, name := convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName))
if p.lastHistogramName != name {
// Different metric name.
return true
}
nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel)
// Different label values.
return p.lastHistogramLabelsHash != nextHash
}
// Save the label set of the classic histogram without suffix and bucket `le` label.
func (p *NHCBParser) storeClassicLabels(name string) {
p.lastHistogramName = name
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel)
}
func (p *NHCBParser) storeExponentialLabels() {
p.lastHistogramName = p.lset.Get(labels.MetricName)
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer)
}
// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB
// if it is actually a classic histogram series (and not a normal float series) and if there
// isn't already a native histogram with the same name (assuming it is always processed
// right before the classic histograms) and returns true if the collation was done.
func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool {
if p.typ != model.MetricTypeHistogram {
return false
}
mName := lset.Get(labels.MetricName)
// Sanity check to ensure that the TYPE metadata entry name is the same as the base name.
suffixType, name := convertnhcb.GetHistogramMetricBaseName(mName)
if name != string(p.bName) {
return false
}
switch suffixType {
case convertnhcb.SuffixBucket:
if !lset.Has(labels.BucketLabel) {
// This should not really happen.
return false
}
le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64)
if err == nil && !math.IsNaN(le) {
p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) {
_ = hist.SetBucketCount(le, p.value)
})
return true
}
case convertnhcb.SuffixCount:
p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) {
_ = hist.SetCount(p.value)
})
return true
case convertnhcb.SuffixSum:
p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) {
_ = hist.SetSum(p.value)
})
return true
}
return false
}
func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, name string, updateHist func(*convertnhcb.TempHistogram)) {
if p.state != stateCollecting {
p.storeClassicLabels(name)
p.tempST = p.parser.StartTimestamp()
p.state = stateCollecting
p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, name)
}
p.storeExemplars()
updateHist(&p.tempNHCB)
}
func (p *NHCBParser) storeExemplars() {
for ex := p.nextExemplarPtr(); p.parser.Exemplar(ex); ex = p.nextExemplarPtr() {
p.tempExemplarCount++
}
}
func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar {
switch {
case p.tempExemplarCount == len(p.tempExemplars)-1:
// Reuse the previously allocated exemplar, it was not filled up.
case len(p.tempExemplars) == cap(p.tempExemplars):
// Let the runtime grow the slice.
p.tempExemplars = append(p.tempExemplars, exemplar.Exemplar{})
default:
// Take the next element into use.
p.tempExemplars = p.tempExemplars[:len(p.tempExemplars)+1]
}
return &p.tempExemplars[len(p.tempExemplars)-1]
}
func (p *NHCBParser) swapExemplars() {
p.exemplars = p.tempExemplars[:p.tempExemplarCount]
p.tempExemplars = p.tempExemplars[:0]
}
// processNHCB converts the collated classic histogram series to NHCB and caches the info
// to be returned to callers. Returns true if the conversion was successful.
func (p *NHCBParser) processNHCB() bool {
if p.state != stateCollecting {
return false
}
h, fh, err := p.tempNHCB.Convert()
if err == nil {
if h != nil {
if err := h.Validate(); err != nil {
return false
}
p.hNHCB = h
p.fhNHCB = nil
} else if fh != nil {
if err := fh.Validate(); err != nil {
return false
}
p.hNHCB = nil
p.fhNHCB = fh
}
lblsWithMetricName := p.tempLsetNHCB.DropReserved(func(n string) bool { return n == labels.MetricName })
// Ensure we return `metric` instead of `metric{}` for name only
// series, for consistency with wrapped parsers.
if lblsWithMetricName.IsEmpty() {
p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName)
} else {
p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + lblsWithMetricName.StringNoSpace()
}
p.bytesNHCB = []byte(p.metricStringNHCB)
p.lsetNHCB = p.tempLsetNHCB
p.swapExemplars()
p.stNHCB = p.tempST
p.state = stateEmitting
} else {
p.state = stateStart
}
p.tempNHCB.Reset()
p.tempExemplarCount = 0
p.tempST = 0
return err == nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/value/value.go | model/value/value.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package value
import (
"math"
)
const (
// NormalNaN is a quiet NaN. This is also math.NaN().
NormalNaN uint64 = 0x7ff8000000000001
// StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0.
// This value is chosen with many leading 0s, so we have scope to store more
// complicated values in the future. It is 2 rather than 1 to make
// it easier to distinguish from the NormalNaN by a human when debugging.
StaleNaN uint64 = 0x7ff0000000000002
)
// IsStaleNaN returns true when the provided NaN value is a stale marker.
func IsStaleNaN(v float64) bool {
return math.Float64bits(v) == StaleNaN
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/exemplar/exemplar.go | model/exemplar/exemplar.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exemplar
import "github.com/prometheus/prometheus/model/labels"
// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of
// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128
// UTF-8 characters."
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars
const ExemplarMaxLabelSetLength = 128
// Exemplar is additional information associated with a time series.
type Exemplar struct {
Labels labels.Labels `json:"labels"`
Value float64 `json:"value"`
Ts int64 `json:"timestamp"`
HasTs bool
}
type QueryResult struct {
SeriesLabels labels.Labels `json:"seriesLabels"`
Exemplars []Exemplar `json:"exemplars"`
}
// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for
// both exemplars then the timestamps will be ignored for the comparison. This can come up
// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp
// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without
// an exported timestamp, as a duplicate of itself for each subsequent scrape.
func (e Exemplar) Equals(e2 Exemplar) bool {
if !labels.Equal(e.Labels, e2.Labels) {
return false
}
if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts {
return false
}
return e.Value == e2.Value
}
// Compare first timestamps, then values, then labels.
func Compare(a, b Exemplar) int {
if a.Ts < b.Ts {
return -1
} else if a.Ts > b.Ts {
return 1
}
if a.Value < b.Value {
return -1
} else if a.Value > b.Value {
return 1
}
return labels.Compare(a.Labels, b.Labels)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/rulefmt/rulefmt.go | model/rulefmt/rulefmt.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rulefmt
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/prometheus/common/model"
"gopkg.in/yaml.v3"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/util/namevalidationutil"
)
// Error represents semantic errors on parsing rule groups.
type Error struct {
Group string
Rule int
RuleName string
Err WrappedError
}
// Error prints the error message in a formatted string.
func (err *Error) Error() string {
if err.Err.err == nil {
return ""
}
if err.Err.nodeAlt != nil {
return fmt.Sprintf("%d:%d: %d:%d: group %q, rule %d, %q: %v", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName, err.Err.err)
}
if err.Err.node != nil {
return fmt.Sprintf("%d:%d: group %q, rule %d, %q: %v", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName, err.Err.err)
}
return fmt.Sprintf("group %q, rule %d, %q: %v", err.Group, err.Rule, err.RuleName, err.Err.err)
}
// Unwrap unpacks wrapped error for use in errors.Is & errors.As.
func (err *Error) Unwrap() error {
return &err.Err
}
// WrappedError wraps error with the yaml node which can be used to represent
// the line and column numbers of the error.
type WrappedError struct {
err error
node *yaml.Node
nodeAlt *yaml.Node
}
// Error prints the error message in a formatted string.
func (we *WrappedError) Error() string {
if we.err == nil {
return ""
}
if we.nodeAlt != nil {
return fmt.Sprintf("%d:%d: %d:%d: %v", we.node.Line, we.node.Column, we.nodeAlt.Line, we.nodeAlt.Column, we.err)
}
if we.node != nil {
return fmt.Sprintf("%d:%d: %v", we.node.Line, we.node.Column, we.err)
}
return we.err.Error()
}
// Unwrap unpacks wrapped error for use in errors.Is & errors.As.
func (we *WrappedError) Unwrap() error {
return we.err
}
// RuleGroups is a set of rule groups that are typically exposed in a file.
type RuleGroups struct {
Groups []RuleGroup `yaml:"groups"`
}
type ruleGroups struct {
Groups []RuleGroupNode `yaml:"groups"`
}
// Validate validates all rules in the rule groups.
func (g *RuleGroups) Validate(node ruleGroups, nameValidationScheme model.ValidationScheme) (errs []error) {
if err := namevalidationutil.CheckNameValidationScheme(nameValidationScheme); err != nil {
errs = append(errs, err)
return errs
}
set := map[string]struct{}{}
for j, g := range g.Groups {
if g.Name == "" {
errs = append(errs, fmt.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column))
}
if _, ok := set[g.Name]; ok {
errs = append(
errs,
fmt.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name),
)
}
for k, v := range g.Labels {
if !nameValidationScheme.IsValidLabelName(k) || k == model.MetricNameLabel {
errs = append(
errs, fmt.Errorf("invalid label name: %s", k),
)
}
if !model.LabelValue(v).IsValid() {
errs = append(
errs, fmt.Errorf("invalid label value: %s", v),
)
}
}
set[g.Name] = struct{}{}
for i, r := range g.Rules {
for _, node := range r.Validate(node.Groups[j].Rules[i], nameValidationScheme) {
var ruleName string
if r.Alert != "" {
ruleName = r.Alert
} else {
ruleName = r.Record
}
errs = append(errs, &Error{
Group: g.Name,
Rule: i + 1,
RuleName: ruleName,
Err: node,
})
}
}
}
return errs
}
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
type RuleGroup struct {
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
QueryOffset *model.Duration `yaml:"query_offset,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []Rule `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
}
// RuleGroupNode adds yaml.v3 layer to support line and columns outputs for invalid rule groups.
type RuleGroupNode struct {
yaml.Node
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
QueryOffset *model.Duration `yaml:"query_offset,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
}
// Rule describes an alerting or recording rule.
type Rule struct {
Record string `yaml:"record,omitempty"`
Alert string `yaml:"alert,omitempty"`
Expr string `yaml:"expr"`
For model.Duration `yaml:"for,omitempty"`
KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"`
Labels map[string]string `yaml:"labels,omitempty"`
Annotations map[string]string `yaml:"annotations,omitempty"`
}
// RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules.
type RuleNode struct {
Record yaml.Node `yaml:"record,omitempty"`
Alert yaml.Node `yaml:"alert,omitempty"`
Expr yaml.Node `yaml:"expr"`
For model.Duration `yaml:"for,omitempty"`
KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"`
Labels map[string]string `yaml:"labels,omitempty"`
Annotations map[string]string `yaml:"annotations,omitempty"`
}
// Validate the rule and return a list of encountered errors.
func (r *Rule) Validate(node RuleNode, nameValidationScheme model.ValidationScheme) (nodes []WrappedError) {
if r.Record != "" && r.Alert != "" {
nodes = append(nodes, WrappedError{
err: errors.New("only one of 'record' and 'alert' must be set"),
node: &node.Record,
nodeAlt: &node.Alert,
})
}
if r.Record == "" && r.Alert == "" {
nodes = append(nodes, WrappedError{
err: errors.New("one of 'record' or 'alert' must be set"),
node: &node.Record,
nodeAlt: &node.Alert,
})
}
if r.Expr == "" {
nodes = append(nodes, WrappedError{
err: errors.New("field 'expr' must be set in rule"),
node: &node.Expr,
})
} else if _, err := parser.ParseExpr(r.Expr); err != nil {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("could not parse expression: %w", err),
node: &node.Expr,
})
}
if r.Record != "" {
if len(r.Annotations) > 0 {
nodes = append(nodes, WrappedError{
err: errors.New("invalid field 'annotations' in recording rule"),
node: &node.Record,
})
}
if r.For != 0 {
nodes = append(nodes, WrappedError{
err: errors.New("invalid field 'for' in recording rule"),
node: &node.Record,
})
}
if r.KeepFiringFor != 0 {
nodes = append(nodes, WrappedError{
err: errors.New("invalid field 'keep_firing_for' in recording rule"),
node: &node.Record,
})
}
if !nameValidationScheme.IsValidMetricName(r.Record) {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid recording rule name: %s", r.Record),
node: &node.Record,
})
}
// While record is a valid UTF-8 it's common mistake to put PromQL expression in the record name.
// Disallow "{}" chars.
if strings.Contains(r.Record, "{") || strings.Contains(r.Record, "}") {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record),
node: &node.Record,
})
}
}
for k, v := range r.Labels {
if !nameValidationScheme.IsValidLabelName(k) || k == model.MetricNameLabel {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid label name: %s", k),
})
}
if !model.LabelValue(v).IsValid() {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid label value: %s", v),
})
}
}
for k := range r.Annotations {
if !nameValidationScheme.IsValidLabelName(k) {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid annotation name: %s", k),
})
}
}
for _, err := range testTemplateParsing(r) {
nodes = append(nodes, WrappedError{err: err})
}
return nodes
}
// testTemplateParsing checks if the templates used in labels and annotations
// of the alerting rules are parsed correctly.
func testTemplateParsing(rl *Rule) (errs []error) {
if rl.Alert == "" {
// Not an alerting rule.
return errs
}
// Trying to parse templates.
tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, "", promql.Sample{})
defs := []string{
"{{$labels := .Labels}}",
"{{$externalLabels := .ExternalLabels}}",
"{{$externalURL := .ExternalURL}}",
"{{$value := .Value}}",
}
parseTest := func(text string) error {
tmpl := template.NewTemplateExpander(
context.TODO(),
strings.Join(append(defs, text), ""),
"__alert_"+rl.Alert,
tmplData,
model.Time(timestamp.FromTime(time.Now())),
nil,
nil,
nil,
)
return tmpl.ParseTest()
}
// Parsing Labels.
for k, val := range rl.Labels {
err := parseTest(val)
if err != nil {
errs = append(errs, fmt.Errorf("label %q: %w", k, err))
}
}
// Parsing Annotations.
for k, val := range rl.Annotations {
err := parseTest(val)
if err != nil {
errs = append(errs, fmt.Errorf("annotation %q: %w", k, err))
}
}
return errs
}
// Parse parses and validates a set of rules.
func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*RuleGroups, []error) {
var (
groups RuleGroups
node ruleGroups
errs []error
)
decoder := yaml.NewDecoder(bytes.NewReader(content))
if !ignoreUnknownFields {
decoder.KnownFields(true)
}
err := decoder.Decode(&groups)
// Ignore io.EOF which happens with empty input.
if err != nil && !errors.Is(err, io.EOF) {
errs = append(errs, err)
}
err = yaml.Unmarshal(content, &node)
if err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return nil, errs
}
return &groups, groups.Validate(node, nameValidationScheme)
}
// ParseFile reads and parses rules from a file.
func ParseFile(file string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*RuleGroups, []error) {
b, err := os.ReadFile(file)
if err != nil {
return nil, []error{fmt.Errorf("%s: %w", file, err)}
}
rgs, errs := Parse(b, ignoreUnknownFields, nameValidationScheme)
for i := range errs {
errs[i] = fmt.Errorf("%s: %w", file, errs[i])
}
return rgs, errs
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/rulefmt/rulefmt_test.go | model/rulefmt/rulefmt_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rulefmt
import (
"errors"
"io"
"path/filepath"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestParseFileSuccess(t *testing.T) {
_, errs := ParseFile("testdata/test.yaml", false, model.UTF8Validation)
require.Empty(t, errs, "unexpected errors parsing file")
_, errs = ParseFile("testdata/utf-8_lname.good.yaml", false, model.UTF8Validation)
require.Empty(t, errs, "unexpected errors parsing file")
_, errs = ParseFile("testdata/utf-8_annotation.good.yaml", false, model.UTF8Validation)
require.Empty(t, errs, "unexpected errors parsing file")
_, errs = ParseFile("testdata/legacy_validation_annotation.good.yaml", false, model.LegacyValidation)
require.Empty(t, errs, "unexpected errors parsing file")
}
func TestParseFileSuccessWithAliases(t *testing.T) {
exprString := `sum without(instance) (rate(errors_total[5m]))
/
sum without(instance) (rate(requests_total[5m]))
`
rgs, errs := ParseFile("testdata/test_aliases.yaml", false, model.UTF8Validation)
require.Empty(t, errs, "unexpected errors parsing file")
for _, rg := range rgs.Groups {
require.Equal(t, "HighAlert", rg.Rules[0].Alert)
require.Equal(t, "critical", rg.Rules[0].Labels["severity"])
require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[0].Annotations["description"])
require.Equal(t, "new_metric", rg.Rules[1].Record)
require.Equal(t, "HighAlert", rg.Rules[2].Alert)
require.Equal(t, "critical", rg.Rules[2].Labels["severity"])
require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[2].Annotations["description"])
require.Equal(t, "HighAlert2", rg.Rules[3].Alert)
require.Equal(t, "critical", rg.Rules[3].Labels["severity"])
for _, rule := range rg.Rules {
require.Equal(t, exprString, rule.Expr)
}
}
}
func TestParseFileFailure(t *testing.T) {
for _, c := range []struct {
filename string
errMsg string
nameValidationScheme model.ValidationScheme
}{
{
filename: "duplicate_grp.bad.yaml",
errMsg: "groupname: \"yolo\" is repeated in the same file",
},
{
filename: "bad_expr.bad.yaml",
errMsg: "parse error",
},
{
filename: "record_and_alert.bad.yaml",
errMsg: "only one of 'record' and 'alert' must be set",
},
{
filename: "no_rec_alert.bad.yaml",
errMsg: "one of 'record' or 'alert' must be set",
},
{
filename: "noexpr.bad.yaml",
errMsg: "field 'expr' must be set in rule",
},
{
filename: "invalid_record_name.bad.yaml",
errMsg: "braces present in the recording rule name; should it be in expr?: strawberry{flavor=\"sweet\"}",
},
{
filename: "bad_field.bad.yaml",
errMsg: "field annotation not found",
},
{
filename: "invalid_label_name.bad.yaml",
errMsg: "invalid label name",
},
{
filename: "record_and_for.bad.yaml",
errMsg: "invalid field 'for' in recording rule",
},
{
filename: "record_and_keep_firing_for.bad.yaml",
errMsg: "invalid field 'keep_firing_for' in recording rule",
},
{
filename: "legacy_validation_annotation.bad.yaml",
nameValidationScheme: model.LegacyValidation,
errMsg: "invalid annotation name: ins-tance",
},
} {
t.Run(c.filename, func(t *testing.T) {
if c.nameValidationScheme == model.UnsetValidation {
c.nameValidationScheme = model.UTF8Validation
}
_, errs := ParseFile(filepath.Join("testdata", c.filename), false, c.nameValidationScheme)
require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename)
require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
})
}
}
func TestTemplateParsing(t *testing.T) {
tests := []struct {
ruleString string
shouldPass bool
}{
{
ruleString: `
groups:
- name: example
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: "page"
annotations:
summary: "Instance {{ $labels.instance }} down"
`,
shouldPass: true,
},
{
ruleString: `
groups:
- name: example
labels:
team: myteam
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: "page"
annotations:
summary: "Instance {{ $labels.instance }} down"
`,
shouldPass: true,
},
{
// `$label` instead of `$labels`.
ruleString: `
groups:
- name: example
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: "page"
annotations:
summary: "Instance {{ $label.instance }} down"
`,
shouldPass: false,
},
{
// `$this_is_wrong`.
ruleString: `
groups:
- name: example
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: "{{$this_is_wrong}}"
annotations:
summary: "Instance {{ $labels.instance }} down"
`,
shouldPass: false,
},
{
// `$labels.quantile * 100`.
ruleString: `
groups:
- name: example
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: "page"
annotations:
summary: "Instance {{ $labels.instance }} down"
description: "{{$labels.quantile * 100}}"
`,
shouldPass: false,
},
}
for _, tst := range tests {
rgs, errs := Parse([]byte(tst.ruleString), false, model.UTF8Validation)
require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString)
passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0)
require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString)
}
}
func TestUniqueErrorNodes(t *testing.T) {
group := `
groups:
- name: example
rules:
- alert: InstanceDown
expr: up ===== 0
for: 5m
labels:
severity: "page"
annotations:
summary: "Instance {{ $labels.instance }} down"
- alert: InstanceUp
expr: up ===== 1
for: 5m
labels:
severity: "page"
annotations:
summary: "Instance {{ $labels.instance }} up"
`
_, errs := Parse([]byte(group), false, model.UTF8Validation)
require.Len(t, errs, 2, "Expected two errors")
var err00 *Error
require.ErrorAs(t, errs[0], &err00)
err0 := err00.Err.node
var err01 *Error
require.ErrorAs(t, errs[1], &err01)
err1 := err01.Err.node
require.NotEqual(t, err0, err1, "Error nodes should not be the same")
}
func TestError(t *testing.T) {
tests := []struct {
name string
error *Error
want string
}{
{
name: "with alternative node provided in WrappedError",
error: &Error{
Group: "some group",
Rule: 1,
RuleName: "some rule name",
Err: WrappedError{
err: errors.New("some error"),
node: &yaml.Node{
Line: 10,
Column: 20,
},
nodeAlt: &yaml.Node{
Line: 11,
Column: 21,
},
},
},
want: `10:20: 11:21: group "some group", rule 1, "some rule name": some error`,
},
{
name: "with node provided in WrappedError",
error: &Error{
Group: "some group",
Rule: 1,
RuleName: "some rule name",
Err: WrappedError{
err: errors.New("some error"),
node: &yaml.Node{
Line: 10,
Column: 20,
},
},
},
want: `10:20: group "some group", rule 1, "some rule name": some error`,
},
{
name: "with only err provided in WrappedError",
error: &Error{
Group: "some group",
Rule: 1,
RuleName: "some rule name",
Err: WrappedError{
err: errors.New("some error"),
},
},
want: `group "some group", rule 1, "some rule name": some error`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require.EqualError(t, tt.error, tt.want)
})
}
}
func TestWrappedError(t *testing.T) {
tests := []struct {
name string
wrappedError *WrappedError
want string
}{
{
name: "with alternative node provided",
wrappedError: &WrappedError{
err: errors.New("some error"),
node: &yaml.Node{
Line: 10,
Column: 20,
},
nodeAlt: &yaml.Node{
Line: 11,
Column: 21,
},
},
want: `10:20: 11:21: some error`,
},
{
name: "with node provided",
wrappedError: &WrappedError{
err: errors.New("some error"),
node: &yaml.Node{
Line: 10,
Column: 20,
},
},
want: `10:20: some error`,
},
{
name: "with only err provided",
wrappedError: &WrappedError{
err: errors.New("some error"),
},
want: `some error`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require.EqualError(t, tt.wrappedError, tt.want)
})
}
}
func TestErrorUnwrap(t *testing.T) {
err1 := errors.New("test error")
tests := []struct {
wrappedError *Error
unwrappedError error
}{
{
wrappedError: &Error{Err: WrappedError{err: err1}},
unwrappedError: err1,
},
{
wrappedError: &Error{Err: WrappedError{err: io.ErrClosedPipe}},
unwrappedError: io.ErrClosedPipe,
},
}
for _, tt := range tests {
t.Run(tt.wrappedError.Error(), func(t *testing.T) {
require.ErrorIs(t, tt.wrappedError, tt.unwrappedError)
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/relabel/relabel_test.go | model/relabel/relabel_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package relabel
import (
"encoding/json"
"strconv"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/testutil"
)
func TestRelabel(t *testing.T) {
tests := []struct {
input labels.Labels
relabel []*Config
output labels.Labels
drop bool
}{
{
input: labels.FromMap(map[string]string{
"a": "foo",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("f(.*)"),
TargetLabel: "d",
Separator: ";",
Replacement: "ch${1}-ch${1}",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
"b": "bar",
"c": "baz",
"d": "choo-choo",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a", "b"},
Regex: MustNewRegexp("f(.*);(.*)r"),
TargetLabel: "a",
Separator: ";",
Replacement: "b${1}${2}m", // boobam
Action: Replace,
},
{
SourceLabels: model.LabelNames{"c", "a"},
Regex: MustNewRegexp("(b).*b(.*)ba(.*)"),
TargetLabel: "d",
Separator: ";",
Replacement: "$1$2$2$3",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "boobam",
"b": "bar",
"c": "baz",
"d": "boooom",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp(".*o.*"),
Action: Drop,
}, {
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("f(.*)"),
TargetLabel: "d",
Separator: ";",
Replacement: "ch$1-ch$1",
Action: Replace,
},
},
drop: true,
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
"b": "bar",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp(".*o.*"),
Action: Drop,
},
},
drop: true,
},
{
input: labels.FromMap(map[string]string{
"a": "abc",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp(".*(b).*"),
TargetLabel: "d",
Separator: ";",
Replacement: "$1",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "abc",
"d": "b",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("no-match"),
Action: Drop,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("f|o"),
Action: Drop,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("no-match"),
Action: Keep,
},
},
drop: true,
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("f.*"),
Action: Keep,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
}),
},
{
// No replacement must be applied if there is no match.
input: labels.FromMap(map[string]string{
"a": "boo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("f"),
TargetLabel: "b",
Replacement: "bar",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "boo",
}),
},
{
// Blank replacement should delete the label.
input: labels.FromMap(map[string]string{
"a": "foo",
"f": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("(f).*"),
TargetLabel: "$1",
Replacement: "$2",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"c"},
TargetLabel: "d",
Separator: ";",
Action: HashMod,
Modulus: 1000,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
"b": "bar",
"c": "baz",
"d": "976",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo\nbar",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
TargetLabel: "b",
Separator: ";",
Action: HashMod,
Modulus: 1000,
},
},
output: labels.FromMap(map[string]string{
"a": "foo\nbar",
"b": "734",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
"b1": "bar",
"b2": "baz",
}),
relabel: []*Config{
{
Regex: MustNewRegexp("(b.*)"),
Replacement: "bar_${1}",
Action: LabelMap,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
"b1": "bar",
"b2": "baz",
"bar_b1": "bar",
"bar_b2": "baz",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
"__meta_my_bar": "aaa",
"__meta_my_baz": "bbb",
"__meta_other": "ccc",
}),
relabel: []*Config{
{
Regex: MustNewRegexp("__meta_(my.*)"),
Replacement: "${1}",
Action: LabelMap,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
"__meta_my_bar": "aaa",
"__meta_my_baz": "bbb",
"__meta_other": "ccc",
"my_bar": "aaa",
"my_baz": "bbb",
}),
},
{ // valid case
input: labels.FromMap(map[string]string{
"a": "some-name-value",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)-([^,]+)"),
Action: Replace,
Replacement: "${2}",
TargetLabel: "${1}",
},
},
output: labels.FromMap(map[string]string{
"a": "some-name-value",
"name": "value",
}),
},
{ // invalid replacement ""
input: labels.FromMap(map[string]string{
"a": "some-name-value",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)-([^,]+)"),
Action: Replace,
Replacement: "${3}",
TargetLabel: "${1}",
},
},
output: labels.FromMap(map[string]string{
"a": "some-name-value",
}),
},
{ // invalid target_labels
input: labels.FromMap(map[string]string{
"a": "some-name-0",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)-([^,]+)"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "${3}",
},
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)-([^,]+)"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "${3}",
},
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)(-[^,]+)"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "${3}",
},
},
output: labels.FromMap(map[string]string{
"a": "some-name-0",
}),
},
{ // more complex real-life like usecase
input: labels.FromMap(map[string]string{
"__meta_sd_tags": "path:/secret,job:some-job,label:foo=bar",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"__meta_sd_tags"},
Regex: MustNewRegexp("(?:.+,|^)path:(/[^,]+).*"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "__metrics_path__",
},
{
SourceLabels: model.LabelNames{"__meta_sd_tags"},
Regex: MustNewRegexp("(?:.+,|^)job:([^,]+).*"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "job",
},
{
SourceLabels: model.LabelNames{"__meta_sd_tags"},
Regex: MustNewRegexp("(?:.+,|^)label:([^=]+)=([^,]+).*"),
Action: Replace,
Replacement: "${2}",
TargetLabel: "${1}",
},
},
output: labels.FromMap(map[string]string{
"__meta_sd_tags": "path:/secret,job:some-job,label:foo=bar",
"__metrics_path__": "/secret",
"job": "some-job",
"foo": "bar",
}),
},
{ // From https://github.com/prometheus/prometheus/issues/12283
input: labels.FromMap(map[string]string{
"__meta_kubernetes_pod_container_port_name": "foo",
"__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091",
}),
relabel: []*Config{
{
Regex: MustNewRegexp("^__meta_kubernetes_pod_container_port_name$"),
Action: LabelDrop,
},
{
SourceLabels: model.LabelNames{"__meta_kubernetes_pod_annotation_XXX_metrics_port"},
Regex: MustNewRegexp("(.+)"),
Action: Replace,
Replacement: "metrics",
TargetLabel: "__meta_kubernetes_pod_container_port_name",
},
{
SourceLabels: model.LabelNames{"__meta_kubernetes_pod_container_port_name"},
Regex: MustNewRegexp("^metrics$"),
Action: Keep,
},
},
output: labels.FromMap(map[string]string{
"__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091",
"__meta_kubernetes_pod_container_port_name": "metrics",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
"b1": "bar",
"b2": "baz",
}),
relabel: []*Config{
{
Regex: MustNewRegexp("(b.*)"),
Action: LabelKeep,
},
},
output: labels.FromMap(map[string]string{
"b1": "bar",
"b2": "baz",
}),
},
{
input: labels.FromMap(map[string]string{
"a": "foo",
"b1": "bar",
"b2": "baz",
}),
relabel: []*Config{
{
Regex: MustNewRegexp("(b.*)"),
Action: LabelDrop,
},
},
output: labels.FromMap(map[string]string{
"a": "foo",
}),
},
{
input: labels.FromMap(map[string]string{
"foo": "bAr123Foo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"foo"},
Action: Uppercase,
TargetLabel: "foo_uppercase",
},
{
SourceLabels: model.LabelNames{"foo"},
Action: Lowercase,
TargetLabel: "foo_lowercase",
},
},
output: labels.FromMap(map[string]string{
"foo": "bAr123Foo",
"foo_lowercase": "bar123foo",
"foo_uppercase": "BAR123FOO",
}),
},
{
input: labels.FromMap(map[string]string{
"__tmp_port": "1234",
"__port1": "1234",
"__port2": "5678",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"__tmp_port"},
Action: KeepEqual,
TargetLabel: "__port1",
},
},
output: labels.FromMap(map[string]string{
"__tmp_port": "1234",
"__port1": "1234",
"__port2": "5678",
}),
},
{
input: labels.FromMap(map[string]string{
"__tmp_port": "1234",
"__port1": "1234",
"__port2": "5678",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"__tmp_port"},
Action: DropEqual,
TargetLabel: "__port1",
},
},
drop: true,
},
{
input: labels.FromMap(map[string]string{
"__tmp_port": "1234",
"__port1": "1234",
"__port2": "5678",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"__tmp_port"},
Action: DropEqual,
TargetLabel: "__port2",
},
},
output: labels.FromMap(map[string]string{
"__tmp_port": "1234",
"__port1": "1234",
"__port2": "5678",
}),
},
{
input: labels.FromMap(map[string]string{
"__tmp_port": "1234",
"__port1": "1234",
"__port2": "5678",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"__tmp_port"},
Action: KeepEqual,
TargetLabel: "__port2",
},
},
drop: true,
},
{
input: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("line1.*line2"),
TargetLabel: "d",
Separator: ";",
Replacement: "match${1}",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
"d": "match",
}),
},
{ // Replace on source label with UTF-8 characters.
input: labels.FromMap(map[string]string{
"utf-8.label": "line1\nline2",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"utf-8.label"},
Regex: MustNewRegexp("line1.*line2"),
TargetLabel: "d",
Separator: ";",
Replacement: `match${1}`,
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"utf-8.label": "line1\nline2",
"b": "bar",
"c": "baz",
"d": "match",
}),
},
{ // Replace targetLabel with UTF-8 characters.
input: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("line1.*line2"),
TargetLabel: "utf-8.label",
Separator: ";",
Replacement: `match${1}`,
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
"utf-8.label": "match",
}),
},
{ // Replace targetLabel with UTF-8 characters and $variable.
input: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("line1.*line2"),
TargetLabel: "utf-8.label${1}",
Separator: ";",
Replacement: "match${1}",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
"utf-8.label": "match",
}),
},
{ // Replace targetLabel with UTF-8 characters and various $var styles.
input: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("(line1).*(?<second>line2)"),
TargetLabel: "label1_${1}",
Separator: ";",
Replacement: "val_${second}",
Action: Replace,
},
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("(line1).*(?<second>line2)"),
TargetLabel: "label2_$1",
Separator: ";",
Replacement: "val_$second",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
"label1_line1": "val_line2",
"label2_line1": "val_line2",
}),
},
{
input: labels.FromMap(map[string]string{
"__name__": "http_requests_total",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"__name__"},
Regex: MustNewRegexp(".*_total$"),
TargetLabel: "__type__",
Replacement: "counter",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"__name__": "http_requests_total",
"__type__": "counter",
}),
},
{
input: labels.FromMap(map[string]string{
"__name__": "disk_usage_bytes",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"__name__"},
Regex: MustNewRegexp(".*_bytes$"),
TargetLabel: "__unit__",
Replacement: "bytes",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"__name__": "disk_usage_bytes",
"__unit__": "bytes",
}),
},
}
for _, test := range tests {
// Setting default fields, mimicking the behaviour in Prometheus.
for _, cfg := range test.relabel {
if cfg.Action == "" {
cfg.Action = DefaultRelabelConfig.Action
}
if cfg.Separator == "" {
cfg.Separator = DefaultRelabelConfig.Separator
}
if cfg.Regex.Regexp == nil || cfg.Regex.String() == "" {
cfg.Regex = DefaultRelabelConfig.Regex
}
if cfg.Replacement == "" {
cfg.Replacement = DefaultRelabelConfig.Replacement
}
cfg.NameValidationScheme = model.UTF8Validation
require.NoError(t, cfg.Validate(model.UTF8Validation))
}
res, keep := Process(test.input, test.relabel...)
require.Equal(t, !test.drop, keep)
if keep {
testutil.RequireEqual(t, test.output, res)
}
}
}
func TestRelabelValidate(t *testing.T) {
tests := []struct {
config Config
expected string
}{
{
config: Config{
NameValidationScheme: model.UTF8Validation,
},
expected: `relabel action cannot be empty`,
},
{
config: Config{
Action: Replace,
NameValidationScheme: model.UTF8Validation,
},
expected: `requires 'target_label' value`,
},
{
config: Config{
Action: Lowercase,
NameValidationScheme: model.UTF8Validation,
},
expected: `requires 'target_label' value`,
},
{
config: Config{
Action: Lowercase,
Replacement: DefaultRelabelConfig.Replacement,
TargetLabel: "${3}", // With UTF-8 naming, this is now a legal relabel rule.
NameValidationScheme: model.UTF8Validation,
},
},
{
config: Config{
Action: Lowercase,
Replacement: DefaultRelabelConfig.Replacement,
TargetLabel: "${3}", // Fails with legacy validation
NameValidationScheme: model.LegacyValidation,
},
expected: "\"${3}\" is invalid 'target_label' for lowercase action",
},
{
config: Config{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)-([^,]+)"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "${3}",
NameValidationScheme: model.UTF8Validation,
},
},
{
config: Config{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)-([^,]+)"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "0${3}", // With UTF-8 naming this targets a valid label.
NameValidationScheme: model.UTF8Validation,
},
},
{
config: Config{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("some-([^-]+)-([^,]+)"),
Action: Replace,
Replacement: "${1}",
TargetLabel: "-${3}", // With UTF-8 naming this targets a valid label.
NameValidationScheme: model.UTF8Validation,
},
},
{
config: Config{
Regex: MustNewRegexp("__meta_kubernetes_pod_label_(strimzi_io_.+)"),
Action: LabelMap,
Replacement: "$1",
NameValidationScheme: model.LegacyValidation,
},
},
{
config: Config{
Regex: MustNewRegexp("__meta_(.+)"),
Action: LabelMap,
Replacement: "${1}",
NameValidationScheme: model.LegacyValidation,
},
},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
err := test.config.Validate(model.UTF8Validation)
if test.expected == "" {
require.NoError(t, err)
} else {
require.ErrorContains(t, err, test.expected)
}
})
}
}
func TestTargetLabelLegacyValidity(t *testing.T) {
for _, test := range []struct {
str string
valid bool
}{
{"-label", false},
{"label", true},
{"label${1}", true},
{"${1}label", true},
{"${1}", true},
{"${1}label", true},
{"${", false},
{"$", false},
{"${}", false},
{"foo${", false},
{"$1", true},
{"asd$2asd", true},
{"-foo${1}bar-", false},
{"bar.foo${1}bar", false},
{"_${1}_", true},
{"foo${bar}foo", true},
} {
require.Equal(t, test.valid, relabelTargetLegacy.Match([]byte(test.str)),
"Expected %q to be %v", test.str, test.valid)
}
}
func BenchmarkRelabel(b *testing.B) {
tests := []struct {
name string
lbls labels.Labels
config string
cfgs []*Config
}{
{
name: "example", // From prometheus/config/testdata/conf.good.yml.
config: `
- source_labels: [job, __meta_dns_name]
regex: "(.*)some-[regex]"
target_label: job
replacement: foo-${1}
# action defaults to 'replace'
- source_labels: [abc]
target_label: cde
- replacement: static
target_label: abc
- regex:
replacement: static
target_label: abc`,
lbls: labels.FromStrings("__meta_dns_name", "example-some-x.com", "abc", "def", "job", "foo"),
},
{
name: "kubernetes",
config: `
- source_labels:
- __meta_kubernetes_pod_container_port_name
regex: .*-metrics
action: keep
- source_labels:
- __meta_kubernetes_pod_label_name
action: drop
regex: ""
- source_labels:
- __meta_kubernetes_pod_phase
regex: Succeeded|Failed
action: drop
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
regex: "false"
action: drop
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scheme
target_label: __scheme__
regex: (https?)
replacement: $1
action: replace
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
regex: (.+)
replacement: $1
action: replace
- source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__
regex: (.+?)(\:\d+)?;(\d+)
replacement: $1:$3
action: replace
- regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+)
replacement: __param_$1
action: labelmap
- regex: __meta_kubernetes_pod_label_prometheus_io_label_(.+)
action: labelmap
- regex: __meta_kubernetes_pod_annotation_prometheus_io_label_(.+)
action: labelmap
- source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_pod_label_name
separator: /
target_label: job
replacement: $1
action: replace
- source_labels:
- __meta_kubernetes_namespace
target_label: namespace
action: replace
- source_labels:
- __meta_kubernetes_pod_name
target_label: pod
action: replace
- source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
action: replace
- source_labels:
- __meta_kubernetes_pod_name
- __meta_kubernetes_pod_container_name
- __meta_kubernetes_pod_container_port_name
separator: ':'
target_label: instance
action: replace
- target_label: cluster
replacement: dev-us-central-0
- source_labels:
- __meta_kubernetes_namespace
regex: hosted-grafana
action: drop
- source_labels:
- __address__
target_label: __tmp_hash
modulus: 3
action: hashmod
- source_labels:
- __tmp_hash
regex: ^0$
action: keep
- regex: __tmp_hash
action: labeldrop`,
lbls: labels.FromStrings(
"__address__", "10.132.183.40:80",
"__meta_kubernetes_namespace", "loki-boltdb-shipper",
"__meta_kubernetes_pod_annotation_promtail_loki_boltdb_shipper_hash", "50523b9759094a144adcec2eae0aa4ad",
"__meta_kubernetes_pod_annotationpresent_promtail_loki_boltdb_shipper_hash", "true",
"__meta_kubernetes_pod_container_init", "false",
"__meta_kubernetes_pod_container_name", "promtail",
"__meta_kubernetes_pod_container_port_name", "http-metrics",
"__meta_kubernetes_pod_container_port_number", "80",
"__meta_kubernetes_pod_container_port_protocol", "TCP",
"__meta_kubernetes_pod_controller_kind", "DaemonSet",
"__meta_kubernetes_pod_controller_name", "promtail-loki-boltdb-shipper",
"__meta_kubernetes_pod_host_ip", "10.128.0.178",
"__meta_kubernetes_pod_ip", "10.132.183.40",
"__meta_kubernetes_pod_label_controller_revision_hash", "555b77cd7d",
"__meta_kubernetes_pod_label_name", "promtail-loki-boltdb-shipper",
"__meta_kubernetes_pod_label_pod_template_generation", "45",
"__meta_kubernetes_pod_labelpresent_controller_revision_hash", "true",
"__meta_kubernetes_pod_labelpresent_name", "true",
"__meta_kubernetes_pod_labelpresent_pod_template_generation", "true",
"__meta_kubernetes_pod_name", "promtail-loki-boltdb-shipper-jgtr7",
"__meta_kubernetes_pod_node_name", "gke-dev-us-central-0-main-n2s8-2-14d53341-9hkr",
"__meta_kubernetes_pod_phase", "Running",
"__meta_kubernetes_pod_ready", "true",
"__meta_kubernetes_pod_uid", "4c586419-7f6c-448d-aeec-ca4fa5b05e60",
"__metrics_path__", "/metrics",
"__scheme__", "http",
"__scrape_interval__", "15s",
"__scrape_timeout__", "10s",
"job", "kubernetes-pods"),
},
{
name: "static label pair",
config: `
- replacement: wwwwww
target_label: wwwwww
- replacement: yyyyyyyyyyyy
target_label: xxxxxxxxx
- replacement: xxxxxxxxx
target_label: yyyyyyyyyyyy
- source_labels: ["something"]
target_label: with_source_labels
replacement: value
- replacement: dropped
target_label: ${0}
- replacement: ${0}
target_label: dropped`,
lbls: labels.FromStrings(
"abcdefg01", "hijklmn1",
"abcdefg02", "hijklmn2",
"abcdefg03", "hijklmn3",
"abcdefg04", "hijklmn4",
"abcdefg05", "hijklmn5",
"abcdefg06", "hijklmn6",
"abcdefg07", "hijklmn7",
"abcdefg08", "hijklmn8",
"job", "foo",
),
},
}
for i := range tests {
err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs)
require.NoError(b, err)
}
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
for b.Loop() {
_, _ = Process(tt.lbls, tt.cfgs...)
}
})
}
}
func TestConfig_UnmarshalThenMarshal(t *testing.T) {
tests := []struct {
name string
inputYaml string
}{
{
name: "Values provided",
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
separator: ;
regex: \\d+
target_label: __meta_kubernetes_pod_container_port_number
replacement: $1
action: replace
`,
},
{
name: "No regex provided",
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
separator: ;
target_label: __meta_kubernetes_pod_container_port_number
replacement: $1
action: keepequal
`,
},
{
name: "Default regex provided",
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
separator: ;
regex: (.*)
target_label: __meta_kubernetes_pod_container_port_number
replacement: $1
action: replace
`,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
unmarshalled := Config{}
err := yaml.Unmarshal([]byte(test.inputYaml), &unmarshalled)
require.NoError(t, err)
marshalled, err := yaml.Marshal(&unmarshalled)
require.NoError(t, err)
require.Equal(t, test.inputYaml, string(marshalled))
})
}
}
func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) {
var zero Regexp
marshalled, err := yaml.Marshal(&zero)
require.NoError(t, err)
require.Equal(t, "null\n", string(marshalled))
var unmarshalled Regexp
err = yaml.Unmarshal(marshalled, &unmarshalled)
require.NoError(t, err)
require.Nil(t, unmarshalled.Regexp)
}
func TestRegexp_JSONUnmarshalThenMarshal(t *testing.T) {
tests := []struct {
name string
input string
}{
{
name: "Empty regex",
input: `{"regex":""}`,
},
{
name: "string literal",
input: `{"regex":"foo"}`,
},
{
name: "regex",
input: `{"regex":".*foo.*"}`,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var unmarshalled Config
err := json.Unmarshal([]byte(test.input), &unmarshalled)
require.NoError(t, err)
marshalled, err := json.Marshal(&unmarshalled)
require.NoError(t, err)
require.Equal(t, test.input, string(marshalled))
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/model/relabel/relabel.go | model/relabel/relabel.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package relabel
import (
"crypto/md5"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"github.com/grafana/regexp"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
)
var (
// relabelTargetLegacy allows targeting labels with legacy Prometheus character set, plus ${<var>} variables for dynamic characters from source the metrics.
relabelTargetLegacy = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`)
DefaultRelabelConfig = Config{
Action: Replace,
Separator: ";",
Regex: MustNewRegexp("(.*)"),
Replacement: "$1",
}
)
// Action is the action to be performed on relabeling.
type Action string
const (
// Replace performs a regex replacement.
Replace Action = "replace"
// Keep drops targets for which the input does not match the regex.
Keep Action = "keep"
// Drop drops targets for which the input does match the regex.
Drop Action = "drop"
// KeepEqual drops targets for which the input does not match the target.
KeepEqual Action = "keepequal"
// DropEqual drops targets for which the input does match the target.
DropEqual Action = "dropequal"
// HashMod sets a label to the modulus of a hash of labels.
HashMod Action = "hashmod"
// LabelMap copies labels to other labelnames based on a regex.
LabelMap Action = "labelmap"
// LabelDrop drops any label matching the regex.
LabelDrop Action = "labeldrop"
// LabelKeep drops any label not matching the regex.
LabelKeep Action = "labelkeep"
// Lowercase maps input letters to their lower case.
Lowercase Action = "lowercase"
// Uppercase maps input letters to their upper case.
Uppercase Action = "uppercase"
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (a *Action) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
switch act := Action(strings.ToLower(s)); act {
case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase, KeepEqual, DropEqual:
*a = act
return nil
}
return fmt.Errorf("unknown relabel action %q", s)
}
// Config is the configuration for relabeling of target label sets.
type Config struct {
// A list of labels from which values are taken and concatenated
// with the configured separator in order.
SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty" json:"source_labels,omitempty"`
// Separator is the string between concatenated values from the source labels.
Separator string `yaml:"separator,omitempty" json:"separator,omitempty"`
// Regex against which the concatenation is matched.
Regex Regexp `yaml:"regex,omitempty" json:"regex,omitempty"`
// Modulus to take of the hash of concatenated values from the source labels.
Modulus uint64 `yaml:"modulus,omitempty" json:"modulus,omitempty"`
// TargetLabel is the label to which the resulting string is written in a replacement.
// Regexp interpolation is allowed for the replace action.
TargetLabel string `yaml:"target_label,omitempty" json:"target_label,omitempty"`
// Replacement is the regex replacement pattern to be used.
Replacement string `yaml:"replacement,omitempty" json:"replacement,omitempty"`
// Action is the action to be performed for the relabeling.
Action Action `yaml:"action,omitempty" json:"action,omitempty"`
// NameValidationScheme to use when validating labels.
NameValidationScheme model.ValidationScheme `yaml:"-" json:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultRelabelConfig
type plain Config
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if c.Regex.Regexp == nil {
c.Regex = MustNewRegexp("")
}
return nil
}
func (c *Config) Validate(nameValidationScheme model.ValidationScheme) error {
if c.Action == "" {
return errors.New("relabel action cannot be empty")
}
if c.Modulus == 0 && c.Action == HashMod {
return errors.New("relabel configuration for hashmod requires non-zero modulus")
}
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
}
// Relabel config validation scheme matches global if left blank.
switch c.NameValidationScheme {
case model.LegacyValidation, model.UTF8Validation:
case model.UnsetValidation:
c.NameValidationScheme = nameValidationScheme
default:
return fmt.Errorf("unknown relabel config name validation method specified, must be either '', 'legacy' or 'utf8', got %s", c.NameValidationScheme)
}
if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !c.NameValidationScheme.IsValidLabelName(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
isValidLabelNameWithRegexVarFn := func(value string) bool {
// UTF-8 allows ${} characters, so standard validation allow $variables by default.
// TODO(bwplotka): Relabelling users cannot put $ and ${<...>} characters in metric names or values.
// Design escaping mechanism to allow that, once valid use case appears.
switch c.NameValidationScheme {
case model.UTF8Validation:
return c.NameValidationScheme.IsValidLabelName(value)
default:
// For legacy validation, use the legacy regex that allows $variables.
return relabelTargetLegacy.MatchString(value)
}
}
if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !c.NameValidationScheme.IsValidLabelName(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement {
return fmt.Errorf("'replacement' can not be set for %s action", c.Action)
}
if c.Action == LabelMap && !isValidLabelNameWithRegexVarFn(c.Replacement) {
return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action)
}
if c.Action == HashMod && !c.NameValidationScheme.IsValidLabelName(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
if c.Action == DropEqual || c.Action == KeepEqual {
if c.Regex != DefaultRelabelConfig.Regex ||
c.Modulus != DefaultRelabelConfig.Modulus ||
c.Separator != DefaultRelabelConfig.Separator ||
c.Replacement != DefaultRelabelConfig.Replacement {
return fmt.Errorf("%s action requires only 'source_labels' and `target_label`, and no other fields", c.Action)
}
}
if c.Action == LabelDrop || c.Action == LabelKeep {
if c.SourceLabels != nil ||
c.TargetLabel != DefaultRelabelConfig.TargetLabel ||
c.Modulus != DefaultRelabelConfig.Modulus ||
c.Separator != DefaultRelabelConfig.Separator ||
c.Replacement != DefaultRelabelConfig.Replacement {
return fmt.Errorf("%s action requires only 'regex', and no other fields", c.Action)
}
}
return nil
}
// Regexp encapsulates a regexp.Regexp and makes it YAML marshalable.
type Regexp struct {
*regexp.Regexp
}
// NewRegexp creates a new anchored Regexp and returns an error if the
// passed-in regular expression does not compile.
func NewRegexp(s string) (Regexp, error) {
regex, err := regexp.Compile("^(?s:" + s + ")$")
return Regexp{Regexp: regex}, err
}
// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile.
func MustNewRegexp(s string) Regexp {
re, err := NewRegexp(s)
if err != nil {
panic(err)
}
return re
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (re *Regexp) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
r, err := NewRegexp(s)
if err != nil {
return err
}
*re = r
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.
func (re Regexp) MarshalYAML() (any, error) {
if re.String() != "" {
return re.String(), nil
}
return nil, nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (re *Regexp) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
r, err := NewRegexp(s)
if err != nil {
return err
}
*re = r
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (re Regexp) MarshalJSON() ([]byte, error) {
return json.Marshal(re.String())
}
// IsZero implements the yaml.IsZeroer interface.
func (re Regexp) IsZero() bool {
return re.Regexp == DefaultRelabelConfig.Regex.Regexp
}
// String returns the original string used to compile the regular expression.
func (re Regexp) String() string {
if re.Regexp == nil {
return ""
}
str := re.Regexp.String()
// Trim the anchor `^(?s:` prefix and `)$` suffix.
return str[5 : len(str)-2]
}
// Process returns a relabeled version of the given label set. The relabel configurations
// are applied in order of input.
// There are circumstances where Process will modify the input label.
// If you want to avoid issues with the input label set being modified, at the cost of
// higher memory usage, you can use lbls.Copy().
// If a label set is dropped, EmptyLabels and false is returned.
func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) {
lb := labels.NewBuilder(lbls)
if !ProcessBuilder(lb, cfgs...) {
return labels.EmptyLabels(), false
}
return lb.Labels(), true
}
// ProcessBuilder is like Process, but the caller passes a labels.Builder
// containing the initial set of labels, which is mutated by the rules.
func ProcessBuilder(lb *labels.Builder, cfgs ...*Config) (keep bool) {
for _, cfg := range cfgs {
keep = relabel(cfg, lb)
if !keep {
return false
}
}
return true
}
func relabel(cfg *Config, lb *labels.Builder) (keep bool) {
var va [16]string
values := va[:0]
if len(cfg.SourceLabels) > cap(values) {
values = make([]string, 0, len(cfg.SourceLabels))
}
for _, ln := range cfg.SourceLabels {
values = append(values, lb.Get(string(ln)))
}
val := strings.Join(values, cfg.Separator)
switch cfg.Action {
case Drop:
if cfg.Regex.MatchString(val) {
return false
}
case Keep:
if !cfg.Regex.MatchString(val) {
return false
}
case DropEqual:
if lb.Get(cfg.TargetLabel) == val {
return false
}
case KeepEqual:
if lb.Get(cfg.TargetLabel) != val {
return false
}
case Replace:
// Fast path to add or delete label pair.
if val == "" && cfg.Regex == DefaultRelabelConfig.Regex &&
!varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) {
lb.Set(cfg.TargetLabel, cfg.Replacement)
break
}
indexes := cfg.Regex.FindStringSubmatchIndex(val)
// If there is no match no replacement must take place.
if indexes == nil {
break
}
target := string(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes))
if !cfg.NameValidationScheme.IsValidLabelName(target) {
break
}
res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes)
if len(res) == 0 {
lb.Del(target)
break
}
lb.Set(target, string(res))
case Lowercase:
lb.Set(cfg.TargetLabel, strings.ToLower(val))
case Uppercase:
lb.Set(cfg.TargetLabel, strings.ToUpper(val))
case HashMod:
hash := md5.Sum([]byte(val))
// Use only the last 8 bytes of the hash to give the same result as earlier versions of this code.
mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus
lb.Set(cfg.TargetLabel, strconv.FormatUint(mod, 10))
case LabelMap:
lb.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) {
res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement)
lb.Set(res, l.Value)
}
})
case LabelDrop:
lb.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) {
lb.Del(l.Name)
}
})
case LabelKeep:
lb.Range(func(l labels.Label) {
if !cfg.Regex.MatchString(l.Name) {
lb.Del(l.Name)
}
})
default:
panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action))
}
return true
}
func varInRegexTemplate(template string) bool {
return strings.Contains(template, "$")
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tracing/tracing_test.go | tracing/tracing_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracing
import (
"testing"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace/noop"
"github.com/prometheus/prometheus/config"
)
func TestInstallingNewTracerProvider(t *testing.T) {
tpBefore := otel.GetTracerProvider()
m := NewManager(promslog.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
},
}
require.NoError(t, m.ApplyConfig(&cfg))
require.NotEqual(t, tpBefore, otel.GetTracerProvider())
}
func TestReinstallingTracerProvider(t *testing.T) {
m := NewManager(promslog.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
Headers: map[string]string{"foo": "bar"},
},
}
require.NoError(t, m.ApplyConfig(&cfg))
tpFirstConfig := otel.GetTracerProvider()
// Trying to apply the same config should not reinstall provider.
require.NoError(t, m.ApplyConfig(&cfg))
require.Equal(t, tpFirstConfig, otel.GetTracerProvider())
cfg2 := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientHTTP,
Headers: map[string]string{"bar": "foo"},
},
}
require.NoError(t, m.ApplyConfig(&cfg2))
require.NotEqual(t, tpFirstConfig, otel.GetTracerProvider())
tpSecondConfig := otel.GetTracerProvider()
// Setting previously unset option should reinstall provider.
cfg2.TracingConfig.Compression = "gzip"
require.NoError(t, m.ApplyConfig(&cfg2))
require.NotEqual(t, tpSecondConfig, otel.GetTracerProvider())
}
func TestReinstallingTracerProviderWithTLS(t *testing.T) {
m := NewManager(promslog.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
TLSConfig: config_util.TLSConfig{
CAFile: "testdata/ca.cer",
},
},
}
require.NoError(t, m.ApplyConfig(&cfg))
tpFirstConfig := otel.GetTracerProvider()
// Trying to apply the same config with TLS should reinstall provider.
require.NoError(t, m.ApplyConfig(&cfg))
require.NotEqual(t, tpFirstConfig, otel.GetTracerProvider())
}
func TestUninstallingTracerProvider(t *testing.T) {
m := NewManager(promslog.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
},
}
require.NoError(t, m.ApplyConfig(&cfg))
require.NotEqual(t, noop.NewTracerProvider(), otel.GetTracerProvider())
// Uninstall by passing empty config.
cfg2 := config.Config{
TracingConfig: config.TracingConfig{},
}
require.NoError(t, m.ApplyConfig(&cfg2))
// Make sure we get a no-op tracer provider after uninstallation.
require.Equal(t, noop.NewTracerProvider(), otel.GetTracerProvider())
}
func TestTracerProviderShutdown(t *testing.T) {
m := NewManager(promslog.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
},
}
require.NoError(t, m.ApplyConfig(&cfg))
m.Stop()
// Check if we closed the done channel.
_, ok := <-m.done
require.False(t, ok)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tracing/tracing.go | tracing/tracing.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracing
import (
"context"
"fmt"
"log/slog"
"reflect"
"time"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/version"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
"google.golang.org/grpc/credentials"
"github.com/prometheus/prometheus/config"
)
const serviceName = "prometheus"
// Manager is capable of building, (re)installing and shutting down
// the tracer provider.
type Manager struct {
logger *slog.Logger
done chan struct{}
config config.TracingConfig
shutdownFunc func() error
}
// NewManager creates a new tracing manager.
func NewManager(logger *slog.Logger) *Manager {
return &Manager{
logger: logger,
done: make(chan struct{}),
}
}
// Run starts the tracing manager. It registers the global text map propagator and error handler.
// It is blocking.
func (m *Manager) Run() {
otel.SetTextMapPropagator(propagation.TraceContext{})
otel.SetErrorHandler(otelErrHandler(func(err error) {
m.logger.Error("OpenTelemetry handler returned an error", "err", err.Error())
}))
<-m.done
}
// ApplyConfig takes care of refreshing the tracing configuration by shutting down
// the current tracer provider (if any is registered) and installing a new one.
func (m *Manager) ApplyConfig(cfg *config.Config) error {
// Update only if a config change is detected. If TLS configuration is
// set, we have to restart the manager to make sure that new TLS
// certificates are picked up.
var blankTLSConfig config_util.TLSConfig
if reflect.DeepEqual(m.config, cfg.TracingConfig) && m.config.TLSConfig == blankTLSConfig {
return nil
}
if m.shutdownFunc != nil {
if err := m.shutdownFunc(); err != nil {
return fmt.Errorf("failed to shut down the tracer provider: %w", err)
}
}
// If no endpoint is set, assume tracing should be disabled.
if cfg.TracingConfig.Endpoint == "" {
m.config = cfg.TracingConfig
m.shutdownFunc = nil
otel.SetTracerProvider(noop.NewTracerProvider())
m.logger.Info("Tracing provider uninstalled.")
return nil
}
tp, shutdownFunc, err := buildTracerProvider(context.Background(), cfg.TracingConfig)
if err != nil {
return fmt.Errorf("failed to install a new tracer provider: %w", err)
}
m.shutdownFunc = shutdownFunc
m.config = cfg.TracingConfig
otel.SetTracerProvider(tp)
m.logger.Info("Successfully installed a new tracer provider.")
return nil
}
// Stop gracefully shuts down the tracer provider and stops the tracing manager.
func (m *Manager) Stop() {
defer close(m.done)
if m.shutdownFunc == nil {
return
}
if err := m.shutdownFunc(); err != nil {
m.logger.Error("failed to shut down the tracer provider", "err", err)
}
m.logger.Info("Tracing manager stopped")
}
type otelErrHandler func(err error)
func (o otelErrHandler) Handle(err error) {
o(err)
}
// buildTracerProvider return a new tracer provider ready for installation, together
// with a shutdown function.
func buildTracerProvider(ctx context.Context, tracingCfg config.TracingConfig) (trace.TracerProvider, func() error, error) {
client, err := getClient(tracingCfg)
if err != nil {
return nil, nil, err
}
exp, err := otlptrace.New(ctx, client)
if err != nil {
return nil, nil, err
}
// Create a resource describing the service and the runtime.
res, err := resource.New(
ctx,
resource.WithSchemaURL(semconv.SchemaURL),
resource.WithAttributes(
semconv.ServiceNameKey.String(serviceName),
semconv.ServiceVersionKey.String(version.Version),
),
resource.WithProcessRuntimeDescription(),
resource.WithTelemetrySDK(),
)
if err != nil {
return nil, nil, err
}
tp := tracesdk.NewTracerProvider(
tracesdk.WithBatcher(exp),
tracesdk.WithSampler(tracesdk.ParentBased(
tracesdk.TraceIDRatioBased(tracingCfg.SamplingFraction),
)),
tracesdk.WithResource(res),
)
return tp, func() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err := tp.Shutdown(ctx)
if err != nil {
return err
}
return nil
}, nil
}
// getClient returns an appropriate OTLP client (either gRPC or HTTP), based
// on the provided tracing configuration.
func getClient(tracingCfg config.TracingConfig) (otlptrace.Client, error) {
var client otlptrace.Client
switch tracingCfg.ClientType {
case config.TracingClientGRPC:
opts := []otlptracegrpc.Option{otlptracegrpc.WithEndpoint(tracingCfg.Endpoint)}
if tracingCfg.Insecure {
opts = append(opts, otlptracegrpc.WithInsecure())
} else {
// Use of TLS Credentials forces the use of TLS. Therefore it can
// only be set when `insecure` is set to false.
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConf)))
}
if tracingCfg.Compression != "" {
opts = append(opts, otlptracegrpc.WithCompressor(tracingCfg.Compression))
}
if len(tracingCfg.Headers) != 0 {
opts = append(opts, otlptracegrpc.WithHeaders(tracingCfg.Headers))
}
if tracingCfg.Timeout != 0 {
opts = append(opts, otlptracegrpc.WithTimeout(time.Duration(tracingCfg.Timeout)))
}
client = otlptracegrpc.NewClient(opts...)
case config.TracingClientHTTP:
opts := []otlptracehttp.Option{otlptracehttp.WithEndpoint(tracingCfg.Endpoint)}
if tracingCfg.Insecure {
opts = append(opts, otlptracehttp.WithInsecure())
}
// Currently, OTEL supports only gzip compression.
if tracingCfg.Compression == config.GzipCompression {
opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.GzipCompression))
}
if len(tracingCfg.Headers) != 0 {
opts = append(opts, otlptracehttp.WithHeaders(tracingCfg.Headers))
}
if tracingCfg.Timeout != 0 {
opts = append(opts, otlptracehttp.WithTimeout(time.Duration(tracingCfg.Timeout)))
}
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracehttp.WithTLSClientConfig(tlsConf))
client = otlptracehttp.NewClient(opts...)
}
return client, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/manager_test.go | rules/manager_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"fmt"
"io/fs"
"maps"
"math"
"os"
"path"
"path/filepath"
"slices"
"sort"
"strconv"
"sync"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/teststorage"
prom_testutil "github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
prom_testutil.TolerantVerifyLeak(m)
}
func TestAlertingRule(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 5m
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateLow",
expr,
time.Minute,
0,
labels.FromStrings("severity", "{{\"c\"}}ritical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
result := promql.Vector{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "pending",
"group", "canary",
"instance", "0",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "pending",
"group", "canary",
"instance", "1",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "firing",
"group", "canary",
"instance", "0",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "firing",
"group", "canary",
"instance", "1",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
}
baseTime := time.Unix(0, 0)
tests := []struct {
time time.Duration
result promql.Vector
}{
{
time: 0,
result: result[:2],
},
{
time: 5 * time.Minute,
result: result[2:],
},
{
time: 10 * time.Minute,
result: result[2:3],
},
{
time: 15 * time.Minute,
result: nil,
},
{
time: 20 * time.Minute,
result: nil,
},
{
time: 25 * time.Minute,
result: result[:1],
},
{
time: 30 * time.Minute,
result: result[2:3],
},
}
ng := testEngine(t)
for i, test := range tests {
t.Logf("case %d", i)
evalTime := baseTime.Add(test.time)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime)
}
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
})
prom_testutil.RequireEqual(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
}
func TestForStateAddSamples(t *testing.T) {
for _, queryOffset := range []time.Duration{0, time.Minute} {
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 5m
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateLow",
expr,
time.Minute,
0,
labels.FromStrings("severity", "{{\"c\"}}ritical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
result := promql.Vector{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow",
"group", "canary",
"instance", "0",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow",
"group", "canary",
"instance", "1",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow",
"group", "canary",
"instance", "0",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow",
"group", "canary",
"instance", "1",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
}
baseTime := time.Unix(0, 0)
tests := []struct {
time time.Duration
result promql.Vector
persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
}{
{
time: 0,
result: append(promql.Vector{}, result[:2]...),
persistThisTime: true,
},
{
time: 5 * time.Minute,
result: append(promql.Vector{}, result[2:]...),
},
{
time: 10 * time.Minute,
result: append(promql.Vector{}, result[2:3]...),
},
{
time: 15 * time.Minute,
result: nil,
},
{
time: 20 * time.Minute,
result: nil,
},
{
time: 25 * time.Minute,
result: append(promql.Vector{}, result[:1]...),
persistThisTime: true,
},
{
time: 30 * time.Minute,
result: append(promql.Vector{}, result[2:3]...),
},
}
ng := testEngine(t)
var forState float64
for i, test := range tests {
t.Logf("case %d", i)
evalTime := baseTime.Add(test.time).Add(queryOffset)
if test.persistThisTime {
forState = float64(evalTime.Unix())
}
if test.result == nil {
forState = float64(value.StaleNaN)
}
res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS' samples.
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS_FOR_STATE" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
require.Equal(t, "ALERTS", smplName)
}
}
for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime.Add(-queryOffset))
// Updating the expected 'for' state.
if test.result[i].F >= 0 {
test.result[i].F = forState
}
}
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
})
prom_testutil.RequireEqual(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
})
}
}
// sortAlerts sorts `[]*Alert` w.r.t. the Labels.
func sortAlerts(items []*Alert) {
sort.Slice(items, func(i, j int) bool {
return labels.Compare(items[i].Labels, items[j].Labels) <= 0
})
}
func TestForStateRestore(t *testing.T) {
for _, queryOffset := range []time.Duration{0, time.Minute} {
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 5m
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err)
ng := testEngine(t)
opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(ng, storage),
Appendable: storage,
Queryable: storage,
Context: context.Background(),
Logger: promslog.NewNopLogger(),
NotifyFunc: func(context.Context, string, ...*Alert) {},
OutageTolerance: 30 * time.Minute,
ForGracePeriod: 10 * time.Minute,
}
alertForDuration := 25 * time.Minute
// Initial run before prometheus goes down.
rule := NewAlertingRule(
"HTTPRequestRateLow",
expr,
alertForDuration,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
group := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{rule},
ShouldRestore: true,
Opts: opts,
})
groups := make(map[string]*Group)
groups["default;"] = group
initialRuns := []time.Duration{0, 5 * time.Minute}
baseTime := time.Unix(0, 0)
for _, duration := range initialRuns {
evalTime := baseTime.Add(duration)
group.Eval(context.TODO(), evalTime)
}
// Prometheus goes down here. We create new rules and groups.
type testInput struct {
name string
restoreDuration time.Duration
expectedAlerts []*Alert
num int
noRestore bool
gracePeriod bool
downDuration time.Duration
before func()
}
tests := []testInput{
{
name: "normal restore (alerts were not firing)",
restoreDuration: 15 * time.Minute,
expectedAlerts: rule.ActiveAlerts(),
downDuration: 10 * time.Minute,
},
{
name: "outage tolerance",
restoreDuration: 40 * time.Minute,
noRestore: true,
num: 2,
},
{
name: "no active alerts",
restoreDuration: 50 * time.Minute,
expectedAlerts: []*Alert{},
},
{
name: "test the grace period",
restoreDuration: 25 * time.Minute,
expectedAlerts: []*Alert{},
gracePeriod: true,
before: func() {
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
evalTime := baseTime.Add(duration)
group.Eval(context.TODO(), evalTime)
}
},
num: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.before != nil {
tt.before()
}
newRule := NewAlertingRule(
"HTTPRequestRateLow",
expr,
alertForDuration,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil,
)
newGroup := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{newRule},
ShouldRestore: true,
Opts: opts,
QueryOffset: &queryOffset,
})
newGroups := make(map[string]*Group)
newGroups["default;"] = newGroup
restoreTime := baseTime.Add(tt.restoreDuration).Add(queryOffset)
// First eval before restoration.
newGroup.Eval(context.TODO(), restoreTime)
// Restore happens here.
newGroup.RestoreForState(restoreTime)
got := newRule.ActiveAlerts()
for _, aa := range got {
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
})
// In all cases, we expect the restoration process to have completed.
require.Truef(t, newRule.Restored(), "expected the rule restoration process to have completed")
// Checking if we have restored it correctly.
switch {
case tt.noRestore:
require.Len(t, got, tt.num)
for _, e := range got {
require.Equal(t, e.ActiveAt, restoreTime)
}
case tt.gracePeriod:
require.Len(t, got, tt.num)
for _, e := range got {
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
}
default:
exp := tt.expectedAlerts
require.Len(t, got, len(exp))
sortAlerts(exp)
sortAlerts(got)
for i, e := range exp {
require.Equal(t, e.Labels, got[i].Labels)
// Difference in time should be within 1e6 ns, i.e. 1ms
// (due to conversion between ns & ms, float64 & int64).
activeAtDiff := queryOffset.Seconds() + float64(e.ActiveAt.Unix()+int64(tt.downDuration/time.Second)-got[i].ActiveAt.Unix())
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
}
}
})
}
})
}
}
func TestStaleness(t *testing.T) {
for _, queryOffset := range []time.Duration{0, time.Minute} {
st := teststorage.New(t)
defer st.Close()
engineOpts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, engineOpts)
opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(engine, st),
Appendable: st,
Queryable: st,
Context: context.Background(),
Logger: promslog.NewNopLogger(),
}
expr, err := parser.ParseExpr("a + 1")
require.NoError(t, err)
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
group := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{rule},
ShouldRestore: true,
Opts: opts,
QueryOffset: &queryOffset,
})
// A time series that has two samples and then goes stale.
app := st.Appender(context.Background())
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
err = app.Commit()
require.NoError(t, err)
ctx := context.Background()
// Execute 3 times, 1 second apart.
group.Eval(ctx, time.Unix(0, 0).Add(queryOffset))
group.Eval(ctx, time.Unix(1, 0).Add(queryOffset))
group.Eval(ctx, time.Unix(2, 0).Add(queryOffset))
querier, err := st.Querier(0, 2000)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
require.NoError(t, err)
set := querier.Select(ctx, false, nil, matcher)
samples, err := readSeriesSet(set)
require.NoError(t, err)
metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
metricSample, ok := samples[metric]
require.True(t, ok, "Series %s not returned.", metric)
require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F))
metricSample[2].F = 42 // require.Equal cannot handle NaN.
want := map[string][]promql.FPoint{
metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}},
}
require.Equal(t, want, samples)
}
}
// Convert a SeriesSet into a form usable with require.Equal.
func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.FPoint, error) {
result := map[string][]promql.FPoint{}
var it chunkenc.Iterator
for ss.Next() {
series := ss.At()
points := []promql.FPoint{}
it := series.Iterator(it)
for it.Next() == chunkenc.ValFloat {
t, v := it.At()
points = append(points, promql.FPoint{T: t, F: v})
}
name := series.Labels().String()
result[name] = points
}
return result, ss.Err()
}
func TestGroup_QueryOffset(t *testing.T) {
config := `
groups:
- name: group1
query_offset: 2m
- name: group2
query_offset: 0s
- name: group3
`
dir := t.TempDir()
fname := path.Join(dir, "rules.yaml")
err := os.WriteFile(fname, []byte(config), fs.ModePerm)
require.NoError(t, err)
m := NewManager(&ManagerOptions{
Logger: promslog.NewNopLogger(),
DefaultRuleQueryOffset: func() time.Duration {
return time.Minute
},
})
m.start()
err = m.Update(time.Second, []string{fname}, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
rgs := m.RuleGroups()
sort.Slice(rgs, func(i, j int) bool {
return rgs[i].Name() < rgs[j].Name()
})
// From config.
require.Equal(t, 2*time.Minute, rgs[0].QueryOffset())
// Setting 0 in config is detected.
require.Equal(t, time.Duration(0), rgs[1].QueryOffset())
// Default when nothing is set.
require.Equal(t, time.Minute, rgs[2].QueryOffset())
m.Stop()
}
func TestCopyState(t *testing.T) {
oldGroup := &Group{
rules: []Rule{
NewAlertingRule("alert", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
NewRecordingRule("rule1", nil, labels.EmptyLabels()),
NewRecordingRule("rule2", nil, labels.EmptyLabels()),
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v1")),
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v2")),
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v3")),
NewAlertingRule("alert2", nil, 0, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
},
seriesInPreviousEval: []map[string]labels.Labels{
{},
{},
{},
{"r3a": labels.FromStrings("l1", "v1")},
{"r3b": labels.FromStrings("l1", "v2")},
{"r3c": labels.FromStrings("l1", "v3")},
{"a2": labels.FromStrings("l2", "v1")},
},
evaluationTime: time.Second,
}
oldGroup.rules[0].(*AlertingRule).active[42] = nil
newGroup := &Group{
rules: []Rule{
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v0")),
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v1")),
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v2")),
NewAlertingRule("alert", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
NewRecordingRule("rule1", nil, labels.EmptyLabels()),
NewAlertingRule("alert2", nil, 0, 0, labels.FromStrings("l2", "v0"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
NewAlertingRule("alert2", nil, 0, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
NewRecordingRule("rule4", nil, labels.EmptyLabels()),
},
seriesInPreviousEval: make([]map[string]labels.Labels, 8),
}
newGroup.CopyState(oldGroup)
want := []map[string]labels.Labels{
nil,
{"r3a": labels.FromStrings("l1", "v1")},
{"r3b": labels.FromStrings("l1", "v2")},
{},
{},
nil,
{"a2": labels.FromStrings("l2", "v1")},
nil,
}
require.Equal(t, want, newGroup.seriesInPreviousEval)
require.Equal(t, oldGroup.rules[0], newGroup.rules[3])
require.Equal(t, oldGroup.evaluationTime, newGroup.evaluationTime)
require.Equal(t, oldGroup.lastEvaluation, newGroup.lastEvaluation)
require.Equal(t, []labels.Labels{labels.FromStrings("l1", "v3")}, newGroup.staleSeries)
}
func TestDeletedRuleMarkedStale(t *testing.T) {
st := teststorage.New(t)
defer st.Close()
oldGroup := &Group{
rules: []Rule{
NewRecordingRule("rule1", nil, labels.FromStrings("l1", "v1")),
},
seriesInPreviousEval: []map[string]labels.Labels{
{"r1": labels.FromStrings("l1", "v1")},
},
}
newGroup := &Group{
rules: []Rule{},
seriesInPreviousEval: []map[string]labels.Labels{},
opts: &ManagerOptions{
Appendable: st,
RuleConcurrencyController: sequentialRuleEvalController{},
},
metrics: NewGroupMetrics(nil),
}
newGroup.CopyState(oldGroup)
newGroup.Eval(context.Background(), time.Unix(0, 0))
querier, err := st.Querier(0, 2000)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1")
require.NoError(t, err)
set := querier.Select(context.Background(), false, nil, matcher)
samples, err := readSeriesSet(set)
require.NoError(t, err)
metric := labels.FromStrings("l1", "v1").String()
metricSample, ok := samples[metric]
require.True(t, ok, "Series %s not returned.", metric)
require.True(t, value.IsStaleNaN(metricSample[0].F), "Appended sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[0].F))
}
func TestUpdate(t *testing.T) {
files := []string{"fixtures/rules.yaml"}
expected := map[string]labels.Labels{
"test": labels.FromStrings("name", "value"),
}
st := teststorage.New(t)
defer st.Close()
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{
Appendable: st,
Queryable: st,
QueryFunc: EngineQueryFunc(engine, st),
Context: context.Background(),
Logger: promslog.NewNopLogger(),
})
ruleManager.start()
defer ruleManager.Stop()
err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
require.NotEmpty(t, ruleManager.groups, "expected non-empty rule groups")
ogs := map[string]*Group{}
for h, g := range ruleManager.groups {
g.seriesInPreviousEval = []map[string]labels.Labels{
expected,
}
ogs[h] = g
}
err = ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
for h, g := range ruleManager.groups {
for _, actual := range g.seriesInPreviousEval {
require.Equal(t, expected, actual)
}
// Groups are the same because of no updates.
require.Equal(t, ogs[h], g)
}
// Groups will be recreated if updated.
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml", false, model.UTF8Validation)
require.Empty(t, errs, "file parsing failures")
tmpFile, err := os.CreateTemp("", "rules.test.*.yaml")
require.NoError(t, err)
defer os.Remove(tmpFile.Name())
defer tmpFile.Close()
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
maps.Copy(ogs, ruleManager.groups)
// Update interval and reload.
for i, g := range rgs.Groups {
if g.Interval != 0 {
rgs.Groups[i].Interval = g.Interval * 2
} else {
rgs.Groups[i].Interval = model.Duration(10)
}
}
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
// Update limit and reload.
for i := range rgs.Groups {
rgs.Groups[i].Limit = 1
}
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
// Change group rules and reload.
for i, g := range rgs.Groups {
for j, r := range g.Rules {
rgs.Groups[i].Rules[j].Expr = fmt.Sprintf("%s * 0", r.Expr)
}
}
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
}
// ruleGroupsTest for running tests over rules.
type ruleGroupsTest struct {
Groups []ruleGroupTest `yaml:"groups"`
}
// ruleGroupTest forms a testing struct for running tests over rules.
type ruleGroupTest struct {
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []rulefmt.Rule `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
}
func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
grps := r.Groups
tmp := []ruleGroupTest{}
for _, g := range grps {
rtmp := []rulefmt.Rule{}
for _, r := range g.Rules {
rtmp = append(rtmp, rulefmt.Rule{
Record: r.Record,
Alert: r.Alert,
Expr: r.Expr,
For: r.For,
Labels: r.Labels,
Annotations: r.Annotations,
})
}
tmp = append(tmp, ruleGroupTest{
Name: g.Name,
Interval: g.Interval,
Limit: g.Limit,
Rules: rtmp,
Labels: g.Labels,
})
}
return ruleGroupsTest{
Groups: tmp,
}
}
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, ogs map[string]*Group) {
bs, err := yaml.Marshal(formatRules(rgs))
require.NoError(t, err)
tmpFile.Seek(0, 0)
_, err = tmpFile.Write(bs)
require.NoError(t, err)
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
for h, g := range ruleManager.groups {
if ogs[h] == g {
t.Fail()
}
ogs[h] = g
}
}
func TestNotify(t *testing.T) {
storage := teststorage.New(t)
defer storage.Close()
engineOpts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, engineOpts)
var lastNotified []*Alert
notifyFunc := func(_ context.Context, _ string, alerts ...*Alert) {
lastNotified = alerts
}
opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(engine, storage),
Appendable: storage,
Queryable: storage,
Context: context.Background(),
Logger: promslog.NewNopLogger(),
NotifyFunc: notifyFunc,
ResendDelay: 2 * time.Second,
}
expr, err := parser.ParseExpr("a > 1")
require.NoError(t, err)
rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
group := NewGroup(GroupOptions{
Name: "alert",
Interval: time.Second,
Rules: []Rule{rule},
ShouldRestore: true,
Opts: opts,
})
app := storage.Appender(context.Background())
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, 3)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 5000, 3)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 6000, 0)
err = app.Commit()
require.NoError(t, err)
ctx := context.Background()
// Alert sent right away
group.Eval(ctx, time.Unix(1, 0))
require.Len(t, lastNotified, 1)
require.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero")
// Alert is not sent 1s later
group.Eval(ctx, time.Unix(2, 0))
require.Empty(t, lastNotified)
// Alert is resent at t=5s
group.Eval(ctx, time.Unix(5, 0))
require.Len(t, lastNotified, 1)
// Resolution alert sent right away
group.Eval(ctx, time.Unix(6, 0))
require.Len(t, lastNotified, 1)
}
func TestMetricsUpdate(t *testing.T) {
files := []string{"fixtures/rules.yaml", "fixtures/rules2.yaml"}
metricNames := []string{
"prometheus_rule_evaluations_total",
"prometheus_rule_evaluation_failures_total",
"prometheus_rule_group_interval_seconds",
"prometheus_rule_group_last_duration_seconds",
"prometheus_rule_group_last_evaluation_timestamp_seconds",
"prometheus_rule_group_rules",
}
storage := teststorage.New(t)
defer storage.Close()
registry := prometheus.NewRegistry()
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{
Appendable: storage,
Queryable: storage,
QueryFunc: EngineQueryFunc(engine, storage),
Context: context.Background(),
Logger: promslog.NewNopLogger(),
Registerer: registry,
})
ruleManager.start()
defer ruleManager.Stop()
countMetrics := func() int {
ms, err := registry.Gather()
require.NoError(t, err)
var metrics int
for _, m := range ms {
s := m.GetName()
if slices.Contains(metricNames, s) {
metrics += len(m.Metric)
}
}
return metrics
}
cases := []struct {
files []string
metrics int
}{
{
files: files,
metrics: 12,
},
{
files: files[:1],
metrics: 6,
},
{
files: files[:0],
metrics: 0,
},
{
files: files[1:],
metrics: 6,
},
}
for i, c := range cases {
err := ruleManager.Update(time.Second, c.files, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
time.Sleep(2 * time.Second)
require.Equal(t, c.metrics, countMetrics(), "test %d: invalid count of metrics", i)
}
}
func TestGroupStalenessOnRemoval(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
files := []string{"fixtures/rules2.yaml"}
sameFiles := []string{"fixtures/rules2_copy.yaml"}
storage := teststorage.New(t)
defer storage.Close()
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{
Appendable: storage,
Queryable: storage,
QueryFunc: EngineQueryFunc(engine, storage),
Context: context.Background(),
Logger: promslog.NewNopLogger(),
})
var stopped bool
ruleManager.start()
defer func() {
if !stopped {
ruleManager.Stop()
}
}()
cases := []struct {
files []string
staleNaN int
}{
{
files: files,
staleNaN: 0,
},
{
// When we remove the files, it should produce a staleness marker.
files: files[:0],
staleNaN: 1,
},
{
// Rules that produce the same metrics but in a different file
// should not produce staleness marker.
files: sameFiles,
staleNaN: 0,
},
{
// Staleness marker should be present as we don't have any rules
// loaded anymore.
files: files[:0],
staleNaN: 1,
},
{
// Add rules back so we have rules loaded when we stop the manager
// and check for the absence of staleness markers.
files: sameFiles,
staleNaN: 0,
},
}
var totalStaleNaN int
for i, c := range cases {
err := ruleManager.Update(time.Second, c.files, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
time.Sleep(3 * time.Second)
totalStaleNaN += c.staleNaN
require.Equal(t, totalStaleNaN, countStaleNaN(t, storage), "test %d/%q: invalid count of staleness markers", i, c.files)
}
ruleManager.Stop()
stopped = true
require.Equal(t, totalStaleNaN, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine")
}
func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
files := []string{"fixtures/rules2.yaml"}
storage := teststorage.New(t)
defer storage.Close()
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{
Appendable: storage,
Queryable: storage,
QueryFunc: EngineQueryFunc(engine, storage),
Context: context.Background(),
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/group.go | rules/group.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"errors"
"log/slog"
"maps"
"math"
"slices"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
// Group is a set of rules that have a logical relation.
type Group struct {
name string
file string
interval time.Duration
queryOffset *time.Duration
limit int
rules []Rule
seriesInPreviousEval []map[string]labels.Labels // One per Rule.
staleSeries []labels.Labels
opts *ManagerOptions
mtx sync.Mutex
evaluationTime time.Duration // Time it took to evaluate the group.
evaluationRuleTimeSum time.Duration // Sum of time it took to evaluate each rule in the group.
lastEvaluation time.Time // Wall-clock time of most recent evaluation.
lastEvalTimestamp time.Time // Time slot used for most recent evaluation.
shouldRestore bool
markStale bool
done chan struct{}
terminated chan struct{}
managerDone chan struct{}
logger *slog.Logger
metrics *Metrics
// Rule group evaluation iteration function,
// defaults to DefaultEvalIterationFunc.
evalIterationFunc GroupEvalIterationFunc
appOpts *storage.AppendOptions
}
// GroupEvalIterationFunc is used to implement and extend rule group
// evaluation iteration logic. It is configured in Group.evalIterationFunc,
// and periodically invoked at each group evaluation interval to
// evaluate the rules in the group at that point in time.
// DefaultEvalIterationFunc is the default implementation.
type GroupEvalIterationFunc func(ctx context.Context, g *Group, evalTimestamp time.Time)
type GroupOptions struct {
Name, File string
Interval time.Duration
Limit int
Rules []Rule
ShouldRestore bool
Opts *ManagerOptions
QueryOffset *time.Duration
done chan struct{}
EvalIterationFunc GroupEvalIterationFunc
}
// NewGroup makes a new Group with the given name, options, and rules.
func NewGroup(o GroupOptions) *Group {
opts := o.Opts
if opts == nil {
opts = &ManagerOptions{}
}
metrics := opts.Metrics
if metrics == nil {
metrics = NewGroupMetrics(opts.Registerer)
}
key := GroupKey(o.File, o.Name)
metrics.IterationsMissed.WithLabelValues(key)
metrics.IterationsScheduled.WithLabelValues(key)
metrics.EvalTotal.WithLabelValues(key)
metrics.EvalFailures.WithLabelValues(key)
metrics.GroupLastEvalTime.WithLabelValues(key)
metrics.GroupLastDuration.WithLabelValues(key)
metrics.GroupLastRuleDurationSum.WithLabelValues(key)
metrics.GroupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
metrics.GroupSamples.WithLabelValues(key)
metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
evalIterationFunc := o.EvalIterationFunc
if evalIterationFunc == nil {
evalIterationFunc = DefaultEvalIterationFunc
}
if opts.Logger == nil {
opts.Logger = promslog.NewNopLogger()
}
return &Group{
name: o.Name,
file: o.File,
interval: o.Interval,
queryOffset: o.QueryOffset,
limit: o.Limit,
rules: o.Rules,
shouldRestore: o.ShouldRestore,
opts: opts,
seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
done: make(chan struct{}),
managerDone: o.done,
terminated: make(chan struct{}),
logger: opts.Logger.With("file", o.File, "group", o.Name),
metrics: metrics,
evalIterationFunc: evalIterationFunc,
appOpts: &storage.AppendOptions{DiscardOutOfOrder: true},
}
}
// Name returns the group name.
func (g *Group) Name() string { return g.name }
// File returns the group's file.
func (g *Group) File() string { return g.file }
// Rules returns the group's rules.
func (g *Group) Rules(matcherSets ...[]*labels.Matcher) []Rule {
if len(matcherSets) == 0 {
return g.rules
}
var rules []Rule
for _, rule := range g.rules {
if matchesMatcherSets(matcherSets, rule.Labels()) {
rules = append(rules, rule)
}
}
return rules
}
func matches(lbls labels.Labels, matchers ...*labels.Matcher) bool {
for _, m := range matchers {
if v := lbls.Get(m.Name); !m.Matches(v) {
return false
}
}
return true
}
// matchesMatcherSets ensures all matches in each matcher set are ANDed and the set of those is ORed.
func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) bool {
if len(matcherSets) == 0 {
return true
}
var ok bool
for _, matchers := range matcherSets {
if matches(lbls, matchers...) {
ok = true
}
}
return ok
}
// Queryable returns the group's queryable.
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
// Context returns the group's context.
func (g *Group) Context() context.Context { return g.opts.Context }
// Interval returns the group's interval.
func (g *Group) Interval() time.Duration { return g.interval }
// Limit returns the group's limit.
func (g *Group) Limit() int { return g.limit }
func (g *Group) Logger() *slog.Logger { return g.logger }
func (g *Group) run(ctx context.Context) {
defer close(g.terminated)
// Wait an initial amount to have consistently slotted intervals.
evalTimestamp := g.EvalTimestamp(time.Now().UnixNano()).Add(g.interval)
select {
case <-time.After(time.Until(evalTimestamp)):
case <-g.done:
return
}
ctx = promql.NewOriginContext(ctx, map[string]any{
"ruleGroup": map[string]string{
"file": g.File(),
"name": g.Name(),
},
})
// The assumption here is that since the ticker was started after having
// waited for `evalTimestamp` to pass, the ticks will trigger soon
// after each `evalTimestamp + N * g.interval` occurrence.
tick := time.NewTicker(g.interval)
defer tick.Stop()
defer func() {
if !g.markStale {
return
}
go func(now time.Time) {
for _, rule := range g.seriesInPreviousEval {
for _, r := range rule {
g.staleSeries = append(g.staleSeries, r)
}
}
// That can be garbage collected at this point.
g.seriesInPreviousEval = nil
// Wait for 2 intervals to give the opportunity to renamed rules
// to insert new series in the tsdb. At this point if there is a
// renamed rule, it should already be started.
select {
case <-g.managerDone:
case <-time.After(2 * g.interval):
g.cleanupStaleSeries(ctx, now)
}
}(time.Now())
}()
g.evalIterationFunc(ctx, g, evalTimestamp)
if g.shouldRestore {
// If we have to restore, we wait for another Eval to finish.
// The reason behind this is, during first eval (or before it)
// we might not have enough data scraped, and recording rules would not
// have updated the latest values, on which some alerts might depend.
select {
case <-g.done:
return
case <-tick.C:
missed := (time.Since(evalTimestamp) / g.interval) - 1
if missed > 0 {
g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
}
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
g.evalIterationFunc(ctx, g, evalTimestamp)
}
g.RestoreForState(time.Now())
}
for {
select {
case <-g.done:
return
default:
select {
case <-g.done:
return
case <-tick.C:
missed := (time.Since(evalTimestamp) / g.interval) - 1
if missed > 0 {
g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
}
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
g.evalIterationFunc(ctx, g, evalTimestamp)
}
}
}
}
func (g *Group) stopAsync() {
close(g.done)
}
func (g *Group) waitStopped() {
<-g.terminated
}
func (g *Group) stop() {
g.stopAsync()
g.waitStopped()
}
func (g *Group) hash() uint64 {
l := labels.New(
labels.Label{Name: "name", Value: g.name},
labels.Label{Name: "file", Value: g.file},
)
return l.Hash()
}
// AlertingRules returns the list of the group's alerting rules.
func (g *Group) AlertingRules() []*AlertingRule {
g.mtx.Lock()
defer g.mtx.Unlock()
var alerts []*AlertingRule
for _, rule := range g.rules {
if alertingRule, ok := rule.(*AlertingRule); ok {
alerts = append(alerts, alertingRule)
}
}
slices.SortFunc(alerts, func(a, b *AlertingRule) int {
if a.State() == b.State() {
return strings.Compare(a.Name(), b.Name())
}
return int(b.State() - a.State())
})
return alerts
}
// HasAlertingRules returns true if the group contains at least one AlertingRule.
func (g *Group) HasAlertingRules() bool {
g.mtx.Lock()
defer g.mtx.Unlock()
for _, rule := range g.rules {
if _, ok := rule.(*AlertingRule); ok {
return true
}
}
return false
}
// GetEvaluationTime returns the time in seconds it took to evaluate the rule group.
func (g *Group) GetEvaluationTime() time.Duration {
g.mtx.Lock()
defer g.mtx.Unlock()
return g.evaluationTime
}
// setEvaluationTime sets the time in seconds the last evaluation took.
func (g *Group) setEvaluationTime(dur time.Duration) {
g.metrics.GroupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds())
g.mtx.Lock()
defer g.mtx.Unlock()
g.evaluationTime = dur
}
// GetRuleEvaluationTimeSum returns the sum of the time it took to evaluate each rule in the group irrespective of concurrency.
func (g *Group) GetRuleEvaluationTimeSum() time.Duration {
g.mtx.Lock()
defer g.mtx.Unlock()
return g.evaluationRuleTimeSum
}
// updateRuleEvaluationTimeSum updates evaluationRuleTimeSum which is the sum of the time it took to evaluate each rule in the group irrespective of concurrency.
// It collects the times from the rules themselves.
func (g *Group) updateRuleEvaluationTimeSum() {
var sum time.Duration
for _, rule := range g.rules {
sum += rule.GetEvaluationDuration()
}
g.metrics.GroupLastRuleDurationSum.WithLabelValues(GroupKey(g.file, g.name)).Set(sum.Seconds())
g.mtx.Lock()
defer g.mtx.Unlock()
g.evaluationRuleTimeSum = sum
}
// GetLastEvaluation returns the time the last evaluation of the rule group took place.
func (g *Group) GetLastEvaluation() time.Time {
g.mtx.Lock()
defer g.mtx.Unlock()
return g.lastEvaluation
}
// setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
func (g *Group) setLastEvaluation(ts time.Time) {
g.metrics.GroupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
g.mtx.Lock()
defer g.mtx.Unlock()
g.lastEvaluation = ts
}
// GetLastEvalTimestamp returns the timestamp of the last evaluation.
func (g *Group) GetLastEvalTimestamp() time.Time {
g.mtx.Lock()
defer g.mtx.Unlock()
return g.lastEvalTimestamp
}
// setLastEvalTimestamp updates lastEvalTimestamp to the timestamp of the last evaluation.
func (g *Group) setLastEvalTimestamp(ts time.Time) {
g.mtx.Lock()
defer g.mtx.Unlock()
g.lastEvalTimestamp = ts
}
// EvalTimestamp returns the immediately preceding consistently slotted evaluation time.
func (g *Group) EvalTimestamp(startTime int64) time.Time {
var (
offset = int64(g.hash() % uint64(g.interval))
// This group's evaluation times differ from the perfect time intervals by `offset` nanoseconds.
// But we can only use `% interval` to align with the interval. And `% interval` will always
// align with the perfect time intervals, instead of this group's. Because of this we add
// `offset` _after_ aligning with the perfect time interval.
//
// There can be cases where adding `offset` to the perfect evaluation time can yield a
// timestamp in the future, which is not what EvalTimestamp should do.
// So we subtract one `offset` to make sure that `now - (now % interval) + offset` gives an
// evaluation time in the past.
adjNow = startTime - offset
// Adjust to perfect evaluation intervals.
base = adjNow - (adjNow % int64(g.interval))
// Add one offset to randomize the evaluation times of this group.
next = base + offset
)
return time.Unix(0, next).UTC()
}
func nameAndLabels(rule Rule) string {
return rule.Name() + rule.Labels().String()
}
// CopyState copies the alerting rule and staleness related state from the given group.
//
// Rules are matched based on their name and labels. If there are duplicates, the
// first is matched with the first, second with the second etc.
func (g *Group) CopyState(from *Group) {
g.evaluationTime = from.evaluationTime
g.lastEvaluation = from.lastEvaluation
ruleMap := make(map[string][]int, len(from.rules))
for fi, fromRule := range from.rules {
nameAndLabels := nameAndLabels(fromRule)
l := ruleMap[nameAndLabels]
ruleMap[nameAndLabels] = append(l, fi)
}
for i, rule := range g.rules {
nameAndLabels := nameAndLabels(rule)
indexes := ruleMap[nameAndLabels]
if len(indexes) == 0 {
continue
}
fi := indexes[0]
g.seriesInPreviousEval[i] = from.seriesInPreviousEval[fi]
ruleMap[nameAndLabels] = indexes[1:]
ar, ok := rule.(*AlertingRule)
if !ok {
continue
}
far, ok := from.rules[fi].(*AlertingRule)
if !ok {
continue
}
maps.Copy(ar.active, far.active)
}
// Handle deleted and unmatched duplicate rules.
g.staleSeries = from.staleSeries
for fi, fromRule := range from.rules {
nameAndLabels := nameAndLabels(fromRule)
l := ruleMap[nameAndLabels]
if len(l) != 0 {
for _, series := range from.seriesInPreviousEval[fi] {
g.staleSeries = append(g.staleSeries, series)
}
}
}
}
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
// Rules can be evaluated concurrently if the `concurrent-rule-eval` feature flag is enabled.
func (g *Group) Eval(ctx context.Context, ts time.Time) {
var (
samplesTotal atomic.Float64
ruleQueryOffset = g.QueryOffset()
)
eval := func(i int, rule Rule, cleanup func()) {
if cleanup != nil {
defer cleanup()
}
logger := g.logger.With("name", rule.Name(), "index", i)
ctx, sp := otel.Tracer("").Start(ctx, "rule")
sp.SetAttributes(attribute.String("name", rule.Name()))
defer func(t time.Time) {
sp.End()
since := time.Since(t)
g.metrics.EvalDuration.Observe(since.Seconds())
g.metrics.EvalDurationHistogram.Observe(since.Seconds())
rule.SetEvaluationDuration(since)
rule.SetEvaluationTimestamp(t)
}(time.Now())
if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() {
logger = logger.With("trace_id", sp.SpanContext().TraceID())
}
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
if err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
var eqc promql.ErrQueryCanceled
if !errors.As(err, &eqc) {
logger.Warn("Evaluating rule failed", "rule", rule, "err", err)
}
return
}
rule.SetHealth(HealthGood)
rule.SetLastError(nil)
samplesTotal.Add(float64(len(vector)))
if ar, ok := rule.(*AlertingRule); ok {
ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
}
var (
numOutOfOrder = 0
numTooOld = 0
numDuplicates = 0
)
app := g.opts.Appendable.Appender(ctx)
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
defer func() {
if err := app.Commit(); err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
logger.Warn("Rule sample appending failed", "err", err)
return
}
g.seriesInPreviousEval[i] = seriesReturned
}()
for _, s := range vector {
if s.H != nil {
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
} else {
app.SetOptions(g.appOpts)
_, err = app.Append(0, s.Metric, s.T, s.F)
}
if err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
numOutOfOrder++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrTooOldSample):
numTooOld++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
numDuplicates++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
default:
logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
}
} else {
buf := [1024]byte{}
seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric
}
}
if numOutOfOrder > 0 {
logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder)
}
if numTooOld > 0 {
logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld)
}
if numDuplicates > 0 {
logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates)
}
for metric, lset := range g.seriesInPreviousEval[i] {
if _, ok := seriesReturned[metric]; !ok {
// Series no longer exposed, mark it stale.
_, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
errors.Is(unwrappedErr, storage.ErrTooOldSample),
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default:
logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err)
}
}
}
}
var wg sync.WaitGroup
ctrl := g.opts.RuleConcurrencyController
if ctrl == nil {
ctrl = sequentialRuleEvalController{}
}
batches := ctrl.SplitGroupIntoBatches(ctx, g)
if len(batches) == 0 {
// Sequential evaluation when batches aren't set.
// This is the behaviour without a defined RuleConcurrencyController
for i, rule := range g.rules {
// Check if the group has been stopped.
select {
case <-g.done:
return
default:
}
eval(i, rule, nil)
}
} else {
// Concurrent evaluation.
for _, batch := range batches {
for _, ruleIndex := range batch {
// Check if the group has been stopped.
select {
case <-g.done:
wg.Wait()
return
default:
}
rule := g.rules[ruleIndex]
if len(batch) > 1 && ctrl.Allow(ctx, g, rule) {
wg.Add(1)
go eval(ruleIndex, rule, func() {
wg.Done()
ctrl.Done(ctx)
})
} else {
eval(ruleIndex, rule, nil)
}
}
// It is important that we finish processing any rules in this current batch - before we move into the next one.
wg.Wait()
}
}
g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load())
g.cleanupStaleSeries(ctx, ts)
}
func (g *Group) QueryOffset() time.Duration {
if g.queryOffset != nil {
return *g.queryOffset
}
if g.opts.DefaultRuleQueryOffset != nil {
return g.opts.DefaultRuleQueryOffset()
}
return time.Duration(0)
}
func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
if len(g.staleSeries) == 0 {
return
}
app := g.opts.Appendable.Appender(ctx)
app.SetOptions(g.appOpts)
queryOffset := g.QueryOffset()
for _, s := range g.staleSeries {
// Rule that produced series no longer configured, mark it stale.
_, err := app.Append(0, s, timestamp.FromTime(ts.Add(-queryOffset)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
errors.Is(unwrappedErr, storage.ErrTooOldSample),
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default:
g.logger.Warn("Adding stale sample for previous configuration failed", "sample", s, "err", err)
}
}
if err := app.Commit(); err != nil {
g.logger.Warn("Stale sample appending for previous configuration failed", "err", err)
} else {
g.staleSeries = nil
}
}
// RestoreForState restores the 'for' state of the alerts
// by looking up last ActiveAt from storage.
func (g *Group) RestoreForState(ts time.Time) {
defer func() {
totalRestoreTimeSeconds := time.Since(ts).Seconds()
g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds)
g.logger.Debug("'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds)
g.shouldRestore = false
}()
maxtMS := int64(model.TimeFromUnixNano(ts.UnixNano()))
// We allow restoration only if alerts were active before after certain time.
mint := ts.Add(-g.opts.OutageTolerance)
mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
q, err := g.opts.Queryable.Querier(mintMS, maxtMS)
if err != nil {
g.logger.Error("Failed to get Querier", "err", err)
return
}
defer func() {
if err := q.Close(); err != nil {
g.logger.Error("Failed to close Querier", "err", err)
}
}()
for _, rule := range g.Rules() {
alertRule, ok := rule.(*AlertingRule)
if !ok {
continue
}
alertHoldDuration := alertRule.HoldDuration()
if alertHoldDuration < g.opts.ForGracePeriod {
// If alertHoldDuration is already less than grace period, we would not
// like to make it wait for `g.opts.ForGracePeriod` time before firing.
// Hence we skip restoration, which will make it wait for alertHoldDuration.
alertRule.SetRestored(true)
continue
}
sset, err := alertRule.QueryForStateSeries(g.opts.Context, q)
if err != nil {
g.logger.Error(
"Failed to restore 'for' state",
labels.AlertName, alertRule.Name(),
"stage", "Select",
"err", err,
)
// Even if we failed to query the `ALERT_FOR_STATE` series, we currently have no way to retry the restore process.
// So the best we can do is mark the rule as restored and let it eventually fire.
alertRule.SetRestored(true)
continue
}
// While not technically the same number of series we expect, it's as good of an approximation as any.
seriesByLabels := make(map[string]storage.Series, alertRule.ActiveAlertsCount())
for sset.Next() {
seriesByLabels[sset.At().Labels().DropReserved(func(n string) bool { return n == labels.MetricName }).String()] = sset.At()
}
// No results for this alert rule.
if len(seriesByLabels) == 0 {
g.logger.Debug("No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name())
alertRule.SetRestored(true)
continue
}
alertRule.ForEachActiveAlert(func(a *Alert) {
var s storage.Series
s, ok := seriesByLabels[a.Labels.String()]
if !ok {
return
}
// Series found for the 'for' state.
var t int64
var v float64
it := s.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
t, v = it.At()
}
if it.Err() != nil {
g.logger.Error("Failed to restore 'for' state",
labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err())
return
}
if value.IsStaleNaN(v) { // Alert was not active.
return
}
downAt := time.Unix(t/1000, 0).UTC()
restoredActiveAt := time.Unix(int64(v), 0).UTC()
timeSpentPending := downAt.Sub(restoredActiveAt)
timeRemainingPending := alertHoldDuration - timeSpentPending
switch {
case timeRemainingPending <= 0:
// It means that alert was firing when prometheus went down.
// In the next Eval, the state of this alert will be set back to
// firing again if it's still firing in that Eval.
// Nothing to be done in this case.
case timeRemainingPending < g.opts.ForGracePeriod:
// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
// /* new firing time */ /* moving back by hold duration */
//
// Proof of correctness:
// firingTime = restoredActiveAt.Add(alertHoldDuration)
// = ts + m.opts.ForGracePeriod - alertHoldDuration + alertHoldDuration
// = ts + m.opts.ForGracePeriod
//
// Time remaining to fire = firingTime.Sub(ts)
// = (ts + m.opts.ForGracePeriod) - ts
// = m.opts.ForGracePeriod
restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
default:
// By shifting ActiveAt to the future (ActiveAt + some_duration),
// the total pending time from the original ActiveAt
// would be `alertHoldDuration + some_duration`.
// Here, some_duration = downDuration.
downDuration := ts.Sub(downAt)
restoredActiveAt = restoredActiveAt.Add(downDuration)
}
a.ActiveAt = restoredActiveAt
g.logger.Debug("'for' state restored",
labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
"labels", a.Labels.String())
})
alertRule.SetRestored(true)
}
}
// Equals return if two groups are the same.
func (g *Group) Equals(ng *Group) bool {
if g.name != ng.name {
return false
}
if g.file != ng.file {
return false
}
if g.interval != ng.interval {
return false
}
if g.limit != ng.limit {
return false
}
if ((g.queryOffset == nil) != (ng.queryOffset == nil)) || (g.queryOffset != nil && ng.queryOffset != nil && *g.queryOffset != *ng.queryOffset) {
return false
}
if len(g.rules) != len(ng.rules) {
return false
}
for i, gr := range g.rules {
if gr.String() != ng.rules[i].String() {
return false
}
}
return true
}
// GroupKey group names need not be unique across filenames.
func GroupKey(file, name string) string {
return file + ";" + name
}
// Constants for instrumentation.
const namespace = "prometheus"
// Metrics for rule evaluation.
type Metrics struct {
EvalDuration prometheus.Summary
EvalDurationHistogram prometheus.Histogram
IterationDuration prometheus.Summary
IterationDurationHistogram prometheus.Histogram
IterationsMissed *prometheus.CounterVec
IterationsScheduled *prometheus.CounterVec
EvalTotal *prometheus.CounterVec
EvalFailures *prometheus.CounterVec
GroupInterval *prometheus.GaugeVec
GroupLastEvalTime *prometheus.GaugeVec
GroupLastDuration *prometheus.GaugeVec
GroupLastRuleDurationSum *prometheus.GaugeVec
GroupLastRestoreDuration *prometheus.GaugeVec
GroupRules *prometheus.GaugeVec
GroupSamples *prometheus.GaugeVec
}
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
// if not nil.
func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
m := &Metrics{
EvalDuration: prometheus.NewSummary(
prometheus.SummaryOpts{
Namespace: namespace,
Name: "rule_evaluation_duration_seconds",
Help: "The duration for a rule to execute.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
EvalDurationHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "rule_evaluation_duration_histogram_seconds",
Help: "The duration for a rule to execute.",
Buckets: []float64{.01, .1, 1, 10},
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
}),
IterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Name: "rule_group_duration_seconds",
Help: "The duration of rule group evaluations.",
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
}),
IterationDurationHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "rule_group_duration_histogram_seconds",
Help: "The duration of rule group evaluations.",
Buckets: []float64{.01, .1, 1, 10},
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
}),
IterationsMissed: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_group_iterations_missed_total",
Help: "The total number of rule group evaluations missed due to slow rule group evaluation.",
},
[]string{"rule_group"},
),
IterationsScheduled: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_group_iterations_total",
Help: "The total number of scheduled rule group evaluations, whether executed or missed.",
},
[]string{"rule_group"},
),
EvalTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_evaluations_total",
Help: "The total number of rule evaluations.",
},
[]string{"rule_group"},
),
EvalFailures: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_evaluation_failures_total",
Help: "The total number of rule evaluation failures.",
},
[]string{"rule_group"},
),
GroupInterval: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_interval_seconds",
Help: "The interval of a rule group.",
},
[]string{"rule_group"},
),
GroupLastEvalTime: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_last_evaluation_timestamp_seconds",
Help: "The timestamp of the last rule group evaluation in seconds.",
},
[]string{"rule_group"},
),
GroupLastDuration: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_last_duration_seconds",
Help: "The duration of the last rule group evaluation.",
},
[]string{"rule_group"},
),
GroupLastRuleDurationSum: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_last_rule_duration_sum_seconds",
Help: "The sum of time in seconds it took to evaluate each rule in the group regardless of concurrency. This should be higher than the group duration if rules are evaluated concurrently.",
},
[]string{"rule_group"},
),
GroupLastRestoreDuration: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_last_restore_duration_seconds",
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/group_test.go | rules/group_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestGroup_Equals(t *testing.T) {
tests := map[string]struct {
first *Group
second *Group
expected bool
}{
"no query offset set on both groups": {
first: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
},
second: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
},
expected: true,
},
"query offset set only on the first group": {
first: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
queryOffset: pointerOf[time.Duration](time.Minute),
},
second: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
},
expected: false,
},
"query offset set on both groups to the same value": {
first: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
queryOffset: pointerOf[time.Duration](time.Minute),
},
second: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
queryOffset: pointerOf[time.Duration](time.Minute),
},
expected: true,
},
"query offset set on both groups to different value": {
first: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
queryOffset: pointerOf[time.Duration](time.Minute),
},
second: &Group{
name: "group-1",
file: "file-1",
interval: time.Minute,
queryOffset: pointerOf[time.Duration](2 * time.Minute),
},
expected: false,
},
}
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
require.Equal(t, testData.expected, testData.first.Equals(testData.second))
require.Equal(t, testData.expected, testData.second.Equals(testData.first))
})
}
}
func pointerOf[T any](value T) *T {
return &value
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/recording.go | rules/recording.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"errors"
"fmt"
"net/url"
"sync"
"time"
"go.uber.org/atomic"
"go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
)
// ErrDuplicateRecordingLabelSet is returned when a recording rule evaluation produces
// metrics with identical labelsets after applying rule labels.
var ErrDuplicateRecordingLabelSet = errors.New("vector contains metrics with the same labelset after applying rule labels")
// A RecordingRule records its vector expression into new timeseries.
type RecordingRule struct {
name string
vector parser.Expr
labels labels.Labels
// The health of the recording rule.
health *atomic.String
// Timestamp of last evaluation of the recording rule.
evaluationTimestamp *atomic.Time
// The last error seen by the recording rule.
lastError *atomic.Error
// Duration of how long it took to evaluate the recording rule.
evaluationDuration *atomic.Duration
dependenciesMutex sync.RWMutex
dependentRules []Rule
dependencyRules []Rule
}
// NewRecordingRule returns a new recording rule.
func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *RecordingRule {
return &RecordingRule{
name: name,
vector: vector,
labels: lset,
health: atomic.NewString(string(HealthUnknown)),
evaluationTimestamp: atomic.NewTime(time.Time{}),
evaluationDuration: atomic.NewDuration(0),
lastError: atomic.NewError(nil),
}
}
// Name returns the rule name.
func (rule *RecordingRule) Name() string {
return rule.name
}
// Query returns the rule query expression.
func (rule *RecordingRule) Query() parser.Expr {
return rule.vector
}
// Labels returns the rule labels.
func (rule *RecordingRule) Labels() labels.Labels {
return rule.labels
}
// Eval evaluates the rule and then overrides the metric names and labels accordingly.
func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) {
ctx = NewOriginContext(ctx, NewRuleDetail(rule))
vector, err := query(ctx, rule.vector.String(), ts.Add(-queryOffset))
if err != nil {
return nil, err
}
// Override the metric name and labels.
lb := labels.NewBuilder(labels.EmptyLabels())
for i := range vector {
sample := &vector[i]
lb.Reset(sample.Metric)
lb.Set(labels.MetricName, rule.name)
rule.labels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
})
sample.Metric = lb.Labels()
}
// Check that the rule does not produce identical metrics after applying
// labels.
if vector.ContainsSameLabelset() {
return nil, ErrDuplicateRecordingLabelSet
}
numSeries := len(vector)
if limit > 0 && numSeries > limit {
return nil, fmt.Errorf("exceeded limit of %d with %d series", limit, numSeries)
}
rule.SetHealth(HealthGood)
rule.SetLastError(err)
return vector, nil
}
func (rule *RecordingRule) String() string {
r := rulefmt.Rule{
Record: rule.name,
Expr: rule.vector.String(),
Labels: rule.labels.Map(),
}
byt, err := yaml.Marshal(r)
if err != nil {
return fmt.Sprintf("error marshaling recording rule: %q", err.Error())
}
return string(byt)
}
// SetEvaluationDuration updates evaluationDuration to the time in seconds it took to evaluate the rule on its last evaluation.
func (rule *RecordingRule) SetEvaluationDuration(dur time.Duration) {
rule.evaluationDuration.Store(dur)
}
// SetLastError sets the current error seen by the recording rule.
func (rule *RecordingRule) SetLastError(err error) {
rule.lastError.Store(err)
}
// LastError returns the last error seen by the recording rule.
func (rule *RecordingRule) LastError() error {
return rule.lastError.Load()
}
// SetHealth sets the current health of the recording rule.
func (rule *RecordingRule) SetHealth(health RuleHealth) {
rule.health.Store(string(health))
}
// Health returns the current health of the recording rule.
func (rule *RecordingRule) Health() RuleHealth {
return RuleHealth(rule.health.Load())
}
// GetEvaluationDuration returns the time in seconds it took to evaluate the recording rule.
func (rule *RecordingRule) GetEvaluationDuration() time.Duration {
return rule.evaluationDuration.Load()
}
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
func (rule *RecordingRule) SetEvaluationTimestamp(ts time.Time) {
rule.evaluationTimestamp.Store(ts)
}
// GetEvaluationTimestamp returns the time the evaluation took place.
func (rule *RecordingRule) GetEvaluationTimestamp() time.Time {
return rule.evaluationTimestamp.Load()
}
func (rule *RecordingRule) SetDependentRules(dependents []Rule) {
rule.dependenciesMutex.Lock()
defer rule.dependenciesMutex.Unlock()
rule.dependentRules = make([]Rule, len(dependents))
copy(rule.dependentRules, dependents)
}
func (rule *RecordingRule) NoDependentRules() bool {
rule.dependenciesMutex.RLock()
defer rule.dependenciesMutex.RUnlock()
if rule.dependentRules == nil {
return false // We don't know if there are dependent rules.
}
return len(rule.dependentRules) == 0
}
func (rule *RecordingRule) DependentRules() []Rule {
rule.dependenciesMutex.RLock()
defer rule.dependenciesMutex.RUnlock()
return rule.dependentRules
}
func (rule *RecordingRule) SetDependencyRules(dependencies []Rule) {
rule.dependenciesMutex.Lock()
defer rule.dependenciesMutex.Unlock()
rule.dependencyRules = make([]Rule, len(dependencies))
copy(rule.dependencyRules, dependencies)
}
func (rule *RecordingRule) NoDependencyRules() bool {
rule.dependenciesMutex.RLock()
defer rule.dependenciesMutex.RUnlock()
if rule.dependencyRules == nil {
return false // We don't know if there are dependency rules.
}
return len(rule.dependencyRules) == 0
}
func (rule *RecordingRule) DependencyRules() []Rule {
rule.dependenciesMutex.RLock()
defer rule.dependenciesMutex.RUnlock()
return rule.dependencyRules
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/alerting_test.go | rules/alerting_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"errors"
"testing"
"time"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
func testEngine(tb testing.TB) *promql.Engine {
tb.Helper()
return promqltest.NewTestEngineWithOpts(tb, promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10000,
Timeout: 100 * time.Second,
NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 },
EnableAtModifier: true,
EnableNegativeOffset: true,
EnablePerStepStats: true,
})
}
func TestAlertingRuleState(t *testing.T) {
tests := []struct {
name string
active map[uint64]*Alert
want AlertState
}{
{
name: "MaxStateFiring",
active: map[uint64]*Alert{
0: {State: StatePending},
1: {State: StateFiring},
},
want: StateFiring,
},
{
name: "MaxStatePending",
active: map[uint64]*Alert{
0: {State: StateInactive},
1: {State: StatePending},
},
want: StatePending,
},
{
name: "MaxStateInactive",
active: map[uint64]*Alert{
0: {State: StateInactive},
1: {State: StateInactive},
},
want: StateInactive,
},
}
for i, test := range tests {
rule := NewAlertingRule(test.name, nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
rule.active = test.active
// Set evaluation timestamp to simulate that the rule has been evaluated
rule.SetEvaluationTimestamp(time.Now())
got := rule.State()
require.Equal(t, test.want, got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
}
}
func TestAlertingRuleTemplateWithHistogram(t *testing.T) {
h := histogram.FloatHistogram{
Schema: 0,
Count: 30,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 1, Length: 5},
},
PositiveBuckets: []float64{1, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 4, Length: 3},
},
NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2},
}
q := func(context.Context, string, time.Time) (promql.Vector, error) {
return []promql.Sample{{H: &h}}, nil
}
expr, err := parser.ParseExpr("foo")
require.NoError(t, err)
rule := NewAlertingRule(
"HistogramAsValue",
expr,
time.Minute,
0,
labels.FromStrings("histogram", "{{ $value }}"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
evalTime := time.Now()
res, err := rule.Eval(context.TODO(), 0, evalTime, q, nil, 0)
require.NoError(t, err)
require.Len(t, res, 2)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
result := promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HistogramAsValue",
"alertstate", "pending",
"histogram", h.String(),
),
T: timestamp.FromTime(evalTime),
F: 1,
}
testutil.RequireEqual(t, result, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
}
func TestAlertingRuleLabelsUpdate(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70 stale
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateLow",
expr,
time.Minute,
0,
// Basing alerting rule labels off of a value that can change is a very bad idea.
// If an alert is going back and forth between two label values it will never fire.
// Instead, you should write two alerts with constant labels.
labels.FromStrings("severity", "{{ if lt $value 80.0 }}critical{{ else }}warning{{ end }}"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
results := []promql.Vector{
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
},
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
"severity", "warning",
),
F: 1,
},
},
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
},
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateLow",
"alertstate", "firing",
"instance", "0",
"job", "app-server",
"severity", "critical",
),
F: 1,
},
},
}
ng := testEngine(t)
baseTime := time.Unix(0, 0)
for i, result := range results {
t.Logf("case %d", i)
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
result[0].T = timestamp.FromTime(evalTime)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
testutil.RequireEqual(t, result, filteredRes)
}
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
require.Empty(t, res)
}
func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
ruleWithoutExternalLabels := NewAlertingRule(
"ExternalLabelDoesNotExist",
expr,
time.Minute,
0,
labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
ruleWithExternalLabels := NewAlertingRule(
"ExternalLabelExists",
expr,
time.Minute,
0,
labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
labels.EmptyLabels(),
labels.FromStrings("foo", "bar", "dings", "bums"),
"",
true, promslog.NewNopLogger(),
)
result := promql.Vector{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "ExternalLabelDoesNotExist",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
"templated_label", "There are 0 external Labels, of which foo is .",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "ExternalLabelExists",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
"templated_label", "There are 2 external Labels, of which foo is bar.",
),
F: 1,
},
}
ng := testEngine(t)
evalTime := time.Unix(0, 0)
result[0].T = timestamp.FromTime(evalTime)
result[1].T = timestamp.FromTime(evalTime)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := ruleWithoutExternalLabels.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
res, err = ruleWithExternalLabels.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
testutil.RequireEqual(t, result, filteredRes)
}
func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
ruleWithoutExternalURL := NewAlertingRule(
"ExternalURLDoesNotExist",
expr,
time.Minute,
0,
labels.FromStrings("templated_label", "The external URL is {{ $externalURL }}."),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
ruleWithExternalURL := NewAlertingRule(
"ExternalURLExists",
expr,
time.Minute,
0,
labels.FromStrings("templated_label", "The external URL is {{ $externalURL }}."),
labels.EmptyLabels(),
labels.EmptyLabels(),
"http://localhost:1234",
true, promslog.NewNopLogger(),
)
result := promql.Vector{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "ExternalURLDoesNotExist",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
"templated_label", "The external URL is .",
),
F: 1,
},
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "ExternalURLExists",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
"templated_label", "The external URL is http://localhost:1234.",
),
F: 1,
},
}
evalTime := time.Unix(0, 0)
result[0].T = timestamp.FromTime(evalTime)
result[1].T = timestamp.FromTime(evalTime)
ng := testEngine(t)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := ruleWithoutExternalURL.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
res, err = ruleWithExternalURL.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
testutil.RequireEqual(t, result, filteredRes)
}
func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
"EmptyLabel",
expr,
time.Minute,
0,
labels.FromStrings("empty_label", ""),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
result := promql.Vector{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "EmptyLabel",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
),
F: 1,
},
}
evalTime := time.Unix(0, 0)
result[0].T = timestamp.FromTime(evalTime)
ng := testEngine(t)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := rule.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
testutil.RequireEqual(t, result, filteredRes)
}
func TestAlertingRuleQueryInTemplate(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
http_requests{job="app-server", instance="0"} 70 85 70 70
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`sum(http_requests) < 100`)
require.NoError(t, err)
ruleWithQueryInTemplate := NewAlertingRule(
"ruleWithQueryInTemplate",
expr,
time.Minute,
0,
labels.FromStrings("label", "value"),
labels.FromStrings("templated_label", `{{- with "sort(sum(http_requests) by (instance))" | query -}}
{{- range $i,$v := . -}}
instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
{{- end -}}
{{- end -}}
`),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
evalTime := time.Unix(0, 0)
ng := testEngine(t)
startQueryCh := make(chan struct{})
getDoneCh := make(chan struct{})
slowQueryFunc := func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) {
if q == "sort(sum(http_requests) by (instance))" {
// This is a minimum reproduction of issue 10703, expand template with query.
close(startQueryCh)
select {
case <-getDoneCh:
case <-time.After(time.Millisecond * 10):
// Assert no blocking when template expanding.
require.Fail(t, "unexpected blocking when template expanding.")
}
}
return EngineQueryFunc(ng, storage)(ctx, q, ts)
}
go func() {
<-startQueryCh
_ = ruleWithQueryInTemplate.Health()
_ = ruleWithQueryInTemplate.LastError()
_ = ruleWithQueryInTemplate.GetEvaluationDuration()
_ = ruleWithQueryInTemplate.GetEvaluationTimestamp()
close(getDoneCh)
}()
_, err = ruleWithQueryInTemplate.Eval(
context.TODO(), 0, evalTime, slowQueryFunc, nil, 0,
)
require.NoError(t, err)
}
func BenchmarkAlertingRuleAtomicField(b *testing.B) {
b.ReportAllocs()
rule := NewAlertingRule("bench", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
done := make(chan struct{})
go func() {
for b.Loop() {
rule.GetEvaluationTimestamp()
}
close(done)
}()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rule.SetEvaluationTimestamp(time.Now())
}
})
<-done
}
func TestAlertingRuleDuplicate(t *testing.T) {
storage := teststorage.New(t)
defer storage.Close()
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx := t.Context()
now := time.Now()
expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
rule := NewAlertingRule(
"foo",
expr,
time.Minute,
0,
labels.FromStrings("test", "test"),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
_, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err)
require.ErrorIs(t, err, ErrDuplicateAlertLabelSet)
}
func TestAlertingRuleLimit(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
metric{label="1"} 1
metric{label="2"} 1
`)
t.Cleanup(func() { storage.Close() })
tests := []struct {
limit int
err string
}{
{
limit: 0,
},
{
limit: -1,
},
{
limit: 2,
},
{
limit: 1,
err: "exceeded limit of 1 with 2 alerts",
},
}
expr, _ := parser.ParseExpr(`metric > 0`)
rule := NewAlertingRule(
"foo",
expr,
time.Minute,
0,
labels.FromStrings("test", "test"),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
evalTime := time.Unix(0, 0)
ng := testEngine(t)
for _, test := range tests {
switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, test.limit); {
case err != nil:
require.EqualError(t, err, test.err)
case test.err != "":
t.Errorf("Expected error %s, got none", test.err)
}
}
}
func TestQueryForStateSeries(t *testing.T) {
testError := errors.New("test error")
type testInput struct {
selectMockFunction func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet
expectedSeries storage.Series
expectedError error
}
tests := []testInput{
// Test for empty series.
{
selectMockFunction: func(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return storage.EmptySeriesSet()
},
expectedSeries: nil,
expectedError: nil,
},
// Test for error series.
{
selectMockFunction: func(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return storage.ErrSeriesSet(testError)
},
expectedSeries: nil,
expectedError: testError,
},
// Test for mock series.
{
selectMockFunction: func(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return storage.TestSeriesSet(storage.MockSeries(
[]int64{1, 2, 3},
[]float64{1, 2, 3},
[]string{"__name__", "ALERTS_FOR_STATE", "alertname", "TestRule", "severity", "critical"},
))
},
expectedSeries: storage.MockSeries(
[]int64{1, 2, 3},
[]float64{1, 2, 3},
[]string{"__name__", "ALERTS_FOR_STATE", "alertname", "TestRule", "severity", "critical"},
),
expectedError: nil,
},
}
testFunc := func(tst testInput) {
querier := &storage.MockQuerier{
SelectMockFunction: tst.selectMockFunction,
}
rule := NewAlertingRule(
"TestRule",
nil,
time.Minute,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
sample := rule.forStateSample(nil, time.Time{}, 0)
seriesSet, err := rule.QueryForStateSeries(context.Background(), querier)
var series storage.Series
for seriesSet.Next() {
if seriesSet.At().Labels().Len() == sample.Metric.Len() {
series = seriesSet.At()
break
}
}
require.Equal(t, tst.expectedSeries, series)
require.Equal(t, tst.expectedError, err)
}
for _, tst := range tests {
testFunc(tst)
}
}
// TestSendAlertsDontAffectActiveAlerts tests a fix for https://github.com/prometheus/prometheus/issues/11424.
func TestSendAlertsDontAffectActiveAlerts(t *testing.T) {
rule := NewAlertingRule(
"TestRule",
nil,
time.Minute,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
// Set an active alert.
lbls := labels.FromStrings("a1", "1")
h := lbls.Hash()
al := &Alert{State: StateFiring, Labels: lbls, ActiveAt: time.Now()}
rule.active[h] = al
expr, err := parser.ParseExpr("foo")
require.NoError(t, err)
rule.vector = expr
// The relabel rule reproduced the bug here.
opts := notifier.Options{
QueueCapacity: 1,
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"a1"},
Regex: relabel.MustNewRegexp("(.+)"),
TargetLabel: "a1",
Replacement: "bug",
Action: "replace",
NameValidationScheme: model.UTF8Validation,
},
},
}
nm := notifier.NewManager(&opts, model.UTF8Validation, promslog.NewNopLogger())
f := SendAlerts(nm, "")
notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) {
require.Len(t, alerts, 1)
require.Equal(t, al, alerts[0])
f(ctx, expr, alerts...)
}
rule.sendAlerts(context.Background(), time.Now(), 0, 0, notifyFunc)
nm.Stop()
// The relabel rule changes a1=1 to a1=bug.
// But the labels with the AlertingRule should not be changed.
testutil.RequireEqual(t, labels.FromStrings("a1", "1"), rule.active[h].Labels)
}
func TestKeepFiringFor(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70 10x5
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests > 50`)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateHigh",
expr,
time.Minute,
time.Minute,
labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
results := []promql.Vector{
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateHigh",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
),
F: 1,
},
},
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateHigh",
"alertstate", "firing",
"instance", "0",
"job", "app-server",
),
F: 1,
},
},
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateHigh",
"alertstate", "firing",
"instance", "0",
"job", "app-server",
),
F: 1,
},
},
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateHigh",
"alertstate", "firing",
"instance", "0",
"job", "app-server",
),
F: 1,
},
},
// From now on the alert should keep firing.
{
promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateHigh",
"alertstate", "firing",
"instance", "0",
"job", "app-server",
),
F: 1,
},
},
}
ng := testEngine(t)
baseTime := time.Unix(0, 0)
for i, result := range results {
t.Logf("case %d", i)
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
result[0].T = timestamp.FromTime(evalTime)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
testutil.RequireEqual(t, result, filteredRes)
}
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
require.Empty(t, res)
}
func TestPendingAndKeepFiringFor(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
http_requests{job="app-server", instance="0"} 75 10x10
`)
t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests > 50`)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateHigh",
expr,
time.Minute,
time.Minute,
labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
result := promql.Sample{
Metric: labels.FromStrings(
"__name__", "ALERTS",
"alertname", "HTTPRequestRateHigh",
"alertstate", "pending",
"instance", "0",
"job", "app-server",
),
F: 1,
}
ng := testEngine(t)
baseTime := time.Unix(0, 0)
result.T = timestamp.FromTime(baseTime)
res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
require.Len(t, res, 2)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
testutil.RequireEqual(t, result, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
evalTime := baseTime.Add(time.Minute)
res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
require.Empty(t, res)
}
// TestAlertingEvalWithOrigin checks that the alerting rule details are passed through the context.
func TestAlertingEvalWithOrigin(t *testing.T) {
ctx := context.Background()
now := time.Now()
const (
name = "my-recording-rule"
query = `count(metric{foo="bar"}) > 0`
)
var (
detail RuleDetail
lbs = labels.FromStrings("test", "test")
)
expr, err := parser.ParseExpr(query)
require.NoError(t, err)
rule := NewAlertingRule(
name,
expr,
time.Second,
time.Minute,
lbs,
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, _ string, _ time.Time) (promql.Vector, error) {
detail = FromOriginContext(ctx)
return nil, nil
}, nil, 0)
require.NoError(t, err)
require.Equal(t, detail, NewRuleDetail(rule))
}
func TestAlertingRule_SetDependentRules(t *testing.T) {
dependentRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
rule := NewAlertingRule(
"test",
&parser.NumberLiteral{Val: 1},
time.Minute,
0,
labels.FromStrings("test", "test"),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
require.False(t, rule.NoDependentRules())
rule.SetDependentRules([]Rule{dependentRule})
require.False(t, rule.NoDependentRules())
require.Equal(t, []Rule{dependentRule}, rule.DependentRules())
rule.SetDependentRules([]Rule{})
require.True(t, rule.NoDependentRules())
require.Empty(t, rule.DependentRules())
}
func TestAlertingRule_SetDependencyRules(t *testing.T) {
dependencyRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
rule := NewAlertingRule(
"test",
&parser.NumberLiteral{Val: 1},
time.Minute,
0,
labels.FromStrings("test", "test"),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
require.False(t, rule.NoDependencyRules())
rule.SetDependencyRules([]Rule{dependencyRule})
require.False(t, rule.NoDependencyRules())
require.Equal(t, []Rule{dependencyRule}, rule.DependencyRules())
rule.SetDependencyRules([]Rule{})
require.True(t, rule.NoDependencyRules())
require.Empty(t, rule.DependencyRules())
}
func TestAlertingRule_ActiveAlertsCount(t *testing.T) {
rule := NewAlertingRule(
"TestRule",
nil,
time.Minute,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
require.Equal(t, 0, rule.ActiveAlertsCount())
// Set an active alert.
lbls := labels.FromStrings("a1", "1")
h := lbls.Hash()
al := &Alert{State: StateFiring, Labels: lbls, ActiveAt: time.Now()}
rule.active[h] = al
require.Equal(t, 1, rule.ActiveAlertsCount())
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/origin.go | rules/origin.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"fmt"
"github.com/prometheus/prometheus/model/labels"
)
type ruleOrigin struct{}
// RuleDetail contains information about the rule that is being evaluated.
type RuleDetail struct {
Name string
Query string
Labels labels.Labels
Kind string
// NoDependentRules is set to true if it's guaranteed that in the rule group there's no other rule
// which depends on this one.
NoDependentRules bool
// NoDependencyRules is set to true if it's guaranteed that this rule doesn't depend on any other
// rule within the rule group.
NoDependencyRules bool
}
const (
KindAlerting = "alerting"
KindRecording = "recording"
)
// NewRuleDetail creates a RuleDetail from a given Rule.
func NewRuleDetail(r Rule) RuleDetail {
var kind string
switch r.(type) {
case *AlertingRule:
kind = KindAlerting
case *RecordingRule:
kind = KindRecording
default:
panic(fmt.Sprintf(`unknown rule type "%T"`, r))
}
return RuleDetail{
Name: r.Name(),
Query: r.Query().String(),
Labels: r.Labels(),
Kind: kind,
NoDependentRules: r.NoDependentRules(),
NoDependencyRules: r.NoDependencyRules(),
}
}
// NewOriginContext returns a new context with data about the origin attached.
func NewOriginContext(ctx context.Context, rule RuleDetail) context.Context {
return context.WithValue(ctx, ruleOrigin{}, rule)
}
// FromOriginContext returns the RuleDetail origin data from the context.
func FromOriginContext(ctx context.Context) RuleDetail {
if rule, ok := ctx.Value(ruleOrigin{}).(RuleDetail); ok {
return rule
}
return RuleDetail{}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/recording_test.go | rules/recording_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
var (
ruleEvaluationTime = time.Unix(0, 0).UTC()
exprWithMetricName, _ = parser.ParseExpr(`sort(metric)`)
exprWithoutMetricName, _ = parser.ParseExpr(`sort(metric + metric)`)
)
var ruleEvalTestScenarios = []struct {
name string
ruleLabels labels.Labels
expr parser.Expr
expected promql.Vector
}{
{
name: "no labels in recording rule, metric name in query result",
ruleLabels: labels.EmptyLabels(),
expr: exprWithMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3"),
F: 1,
T: timestamp.FromTime(ruleEvaluationTime),
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4"),
F: 10,
T: timestamp.FromTime(ruleEvaluationTime),
},
},
},
{
name: "only new labels in recording rule, metric name in query result",
ruleLabels: labels.FromStrings("extra_from_rule", "foo"),
expr: exprWithMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3", "extra_from_rule", "foo"),
F: 1,
T: timestamp.FromTime(ruleEvaluationTime),
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4", "extra_from_rule", "foo"),
F: 10,
T: timestamp.FromTime(ruleEvaluationTime),
},
},
},
{
name: "some replacement labels in recording rule, metric name in query result",
ruleLabels: labels.FromStrings("label_a", "from_rule"),
expr: exprWithMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "from_rule", "label_b", "3"),
F: 1,
T: timestamp.FromTime(ruleEvaluationTime),
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "from_rule", "label_b", "4"),
F: 10,
T: timestamp.FromTime(ruleEvaluationTime),
},
},
},
{
name: "no labels in recording rule, no metric name in query result",
ruleLabels: labels.EmptyLabels(),
expr: exprWithoutMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3"),
F: 2,
T: timestamp.FromTime(ruleEvaluationTime),
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4"),
F: 20,
T: timestamp.FromTime(ruleEvaluationTime),
},
},
},
}
func setUpRuleEvalTest(t require.TestingT) *teststorage.TestStorage {
return promqltest.LoadedStorage(t, `
load 1m
metric{label_a="1",label_b="3"} 1
metric{label_a="2",label_b="4"} 10
`)
}
func TestRuleEval(t *testing.T) {
storage := setUpRuleEvalTest(t)
t.Cleanup(func() { storage.Close() })
ng := testEngine(t)
for _, scenario := range ruleEvalTestScenarios {
t.Run(scenario.name, func(t *testing.T) {
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err)
testutil.RequireEqual(t, scenario.expected, result)
})
}
}
func BenchmarkRuleEval(b *testing.B) {
storage := setUpRuleEvalTest(b)
b.Cleanup(func() { storage.Close() })
ng := testEngine(b)
for _, scenario := range ruleEvalTestScenarios {
b.Run(scenario.name, func(b *testing.B) {
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
b.ResetTimer()
for b.Loop() {
_, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(ng, storage), nil, 0)
if err != nil {
require.NoError(b, err)
}
}
})
}
}
// TestRuleEvalDuplicate tests for duplicate labels in recorded metrics, see #5529.
func TestRuleEvalDuplicate(t *testing.T) {
storage := teststorage.New(t)
defer storage.Close()
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx := t.Context()
now := time.Now()
expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test"))
_, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err)
require.ErrorIs(t, err, ErrDuplicateRecordingLabelSet)
}
func TestRecordingRuleLimit(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
metric{label="1"} 1
metric{label="2"} 1
`)
t.Cleanup(func() { storage.Close() })
tests := []struct {
limit int
err string
}{
{
limit: 0,
},
{
limit: -1,
},
{
limit: 2,
},
{
limit: 1,
err: "exceeded limit of 1 with 2 series",
},
}
expr, _ := parser.ParseExpr(`metric > 0`)
rule := NewRecordingRule(
"foo",
expr,
labels.FromStrings("test", "test"),
)
ng := testEngine(t)
evalTime := time.Unix(0, 0)
for _, test := range tests {
switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, test.limit); {
case err != nil:
require.EqualError(t, err, test.err)
case test.err != "":
t.Errorf("Expected error %s, got none", test.err)
}
}
}
// TestRecordingEvalWithOrigin checks that the recording rule details are passed through the context.
func TestRecordingEvalWithOrigin(t *testing.T) {
ctx := context.Background()
now := time.Now()
const (
name = "my-recording-rule"
query = `count(metric{foo="bar"})`
)
var (
detail RuleDetail
lbs = labels.FromStrings("foo", "bar")
)
expr, err := parser.ParseExpr(query)
require.NoError(t, err)
rule := NewRecordingRule(name, expr, lbs)
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, _ string, _ time.Time) (promql.Vector, error) {
detail = FromOriginContext(ctx)
return nil, nil
}, nil, 0)
require.NoError(t, err)
require.Equal(t, detail, NewRuleDetail(rule))
}
func TestRecordingRule_SetDependentRules(t *testing.T) {
dependentRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
rule := NewRecordingRule("1", &parser.NumberLiteral{Val: 1}, labels.EmptyLabels())
require.False(t, rule.NoDependentRules())
rule.SetDependentRules([]Rule{dependentRule})
require.False(t, rule.NoDependentRules())
require.Equal(t, []Rule{dependentRule}, rule.DependentRules())
rule.SetDependentRules([]Rule{})
require.True(t, rule.NoDependentRules())
require.Empty(t, rule.DependentRules())
}
func TestRecordingRule_SetDependencyRules(t *testing.T) {
dependencyRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
rule := NewRecordingRule("1", &parser.NumberLiteral{Val: 1}, labels.EmptyLabels())
require.False(t, rule.NoDependencyRules())
rule.SetDependencyRules([]Rule{dependencyRule})
require.False(t, rule.NoDependencyRules())
require.Equal(t, []Rule{dependencyRule}, rule.DependencyRules())
rule.SetDependencyRules([]Rule{})
require.True(t, rule.NoDependencyRules())
require.Empty(t, rule.DependencyRules())
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/alerting.go | rules/alerting.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"errors"
"fmt"
"log/slog"
"net/url"
"strings"
"sync"
"time"
"github.com/prometheus/common/model"
"go.uber.org/atomic"
"go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/template"
)
const (
// AlertMetricName is the metric name for synthetic alert timeseries.
alertMetricName = "ALERTS"
// AlertForStateMetricName is the metric name for 'for' state of alert.
alertForStateMetricName = "ALERTS_FOR_STATE"
// AlertStateLabel is the label name indicating the state of an alert.
alertStateLabel = "alertstate"
)
// ErrDuplicateAlertLabelSet is returned when an alerting rule evaluation produces
// metrics with identical labelsets after applying alert labels.
var ErrDuplicateAlertLabelSet = errors.New("vector contains metrics with the same labelset after applying alert labels")
// AlertState denotes the state of an active alert.
type AlertState int
const (
// StateUnknown is the state of an alert that has not yet been evaluated.
StateUnknown AlertState = iota
// StateInactive is the state of an alert that is neither firing nor pending.
StateInactive
// StatePending is the state of an alert that has been active for less than
// the configured threshold duration.
StatePending
// StateFiring is the state of an alert that has been active for longer than
// the configured threshold duration.
StateFiring
)
func (s AlertState) String() string {
switch s {
case StateUnknown:
return "unknown"
case StateInactive:
return "inactive"
case StatePending:
return "pending"
case StateFiring:
return "firing"
}
panic(fmt.Errorf("unknown alert state: %d", s))
}
// Alert is the user-level representation of a single instance of an alerting rule.
type Alert struct {
State AlertState
Labels labels.Labels
Annotations labels.Labels
// The value at the last evaluation of the alerting expression.
Value float64
// The interval during which the condition of this alert held true.
// ResolvedAt will be 0 to indicate a still active alert.
ActiveAt time.Time
FiredAt time.Time
ResolvedAt time.Time
LastSentAt time.Time
ValidUntil time.Time
KeepFiringSince time.Time
}
func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool {
if a.State == StatePending {
return false
}
// if an alert has been resolved since the last send, resend it
if a.ResolvedAt.After(a.LastSentAt) {
return true
}
return a.LastSentAt.Add(resendDelay).Before(ts)
}
// An AlertingRule generates alerts from its vector expression.
type AlertingRule struct {
// The name of the alert.
name string
// The vector expression from which to generate alerts.
vector parser.Expr
// The duration for which a labelset needs to persist in the expression
// output vector before an alert transitions from Pending to Firing state.
holdDuration time.Duration
// The amount of time that the alert should remain firing after the
// resolution.
keepFiringFor time.Duration
// Extra labels to attach to the resulting alert sample vectors.
labels labels.Labels
// Non-identifying key/value pairs.
annotations labels.Labels
// External labels from the global config.
externalLabels map[string]string
// The external URL from the --web.external-url flag.
externalURL string
// true if old state has been restored. We start persisting samples for ALERT_FOR_STATE
// only after the restoration.
restored *atomic.Bool
// Time in seconds taken to evaluate rule.
evaluationDuration *atomic.Duration
// Timestamp of last evaluation of rule.
evaluationTimestamp *atomic.Time
// The health of the alerting rule.
health *atomic.String
// The last error seen by the alerting rule.
lastError *atomic.Error
// activeMtx Protects the `active` map.
activeMtx sync.Mutex
// A map of alerts which are currently active (Pending or Firing), keyed by
// the fingerprint of the labelset they correspond to.
active map[uint64]*Alert
logger *slog.Logger
dependenciesMutex sync.RWMutex
dependentRules []Rule
dependencyRules []Rule
}
// NewAlertingRule constructs a new AlertingRule.
func NewAlertingRule(
name string, vec parser.Expr, hold, keepFiringFor time.Duration,
labels, annotations, externalLabels labels.Labels, externalURL string,
restored bool, logger *slog.Logger,
) *AlertingRule {
el := externalLabels.Map()
return &AlertingRule{
name: name,
vector: vec,
holdDuration: hold,
keepFiringFor: keepFiringFor,
labels: labels,
annotations: annotations,
externalLabels: el,
externalURL: externalURL,
active: map[uint64]*Alert{},
logger: logger,
restored: atomic.NewBool(restored),
health: atomic.NewString(string(HealthUnknown)),
evaluationTimestamp: atomic.NewTime(time.Time{}),
evaluationDuration: atomic.NewDuration(0),
lastError: atomic.NewError(nil),
}
}
// Name returns the name of the alerting rule.
func (r *AlertingRule) Name() string {
return r.name
}
// SetLastError sets the current error seen by the alerting rule.
func (r *AlertingRule) SetLastError(err error) {
r.lastError.Store(err)
}
// LastError returns the last error seen by the alerting rule.
func (r *AlertingRule) LastError() error {
return r.lastError.Load()
}
// SetHealth sets the current health of the alerting rule.
func (r *AlertingRule) SetHealth(health RuleHealth) {
r.health.Store(string(health))
}
// Health returns the current health of the alerting rule.
func (r *AlertingRule) Health() RuleHealth {
return RuleHealth(r.health.String())
}
// Query returns the query expression of the alerting rule.
func (r *AlertingRule) Query() parser.Expr {
return r.vector
}
// HoldDuration returns the hold duration of the alerting rule.
func (r *AlertingRule) HoldDuration() time.Duration {
return r.holdDuration
}
// KeepFiringFor returns the duration an alerting rule should keep firing for
// after resolution.
func (r *AlertingRule) KeepFiringFor() time.Duration {
return r.keepFiringFor
}
// Labels returns the labels of the alerting rule.
func (r *AlertingRule) Labels() labels.Labels {
return r.labels
}
// Annotations returns the annotations of the alerting rule.
func (r *AlertingRule) Annotations() labels.Labels {
return r.annotations
}
func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
lb := labels.NewBuilder(r.labels)
alert.Labels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
})
lb.Set(labels.MetricName, alertMetricName)
lb.Set(labels.AlertName, r.name)
lb.Set(alertStateLabel, alert.State.String())
s := promql.Sample{
Metric: lb.Labels(),
T: timestamp.FromTime(ts),
F: 1,
}
return s
}
// forStateSample returns a promql.Sample with the rule labels, `ALERTS_FOR_STATE` as the metric name and the rule name as the `alertname` label.
// Optionally, if an alert is provided it'll copy the labels of the alert into the sample labels.
func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample {
lb := labels.NewBuilder(r.labels)
if alert != nil {
alert.Labels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
})
}
lb.Set(labels.MetricName, alertForStateMetricName)
lb.Set(labels.AlertName, r.name)
s := promql.Sample{
Metric: lb.Labels(),
T: timestamp.FromTime(ts),
F: v,
}
return s
}
// QueryForStateSeries returns the series for ALERTS_FOR_STATE of the alert rule.
func (r *AlertingRule) QueryForStateSeries(ctx context.Context, q storage.Querier) (storage.SeriesSet, error) {
// We use a sample to ease the building of matchers.
// Don't provide an alert as we want matchers that match all series for the alert rule.
smpl := r.forStateSample(nil, time.Now(), 0)
var matchers []*labels.Matcher
smpl.Metric.Range(func(l labels.Label) {
mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value)
if err != nil {
panic(err)
}
matchers = append(matchers, mt)
})
sset := q.Select(ctx, false, nil, matchers...)
return sset, sset.Err()
}
// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) {
r.evaluationDuration.Store(dur)
}
// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
func (r *AlertingRule) GetEvaluationDuration() time.Duration {
return r.evaluationDuration.Load()
}
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) {
r.evaluationTimestamp.Store(ts)
}
// GetEvaluationTimestamp returns the time the evaluation took place.
func (r *AlertingRule) GetEvaluationTimestamp() time.Time {
return r.evaluationTimestamp.Load()
}
// SetRestored updates the restoration state of the alerting rule.
func (r *AlertingRule) SetRestored(restored bool) {
r.restored.Store(restored)
}
// Restored returns the restoration state of the alerting rule.
func (r *AlertingRule) Restored() bool {
return r.restored.Load()
}
func (r *AlertingRule) SetDependentRules(dependents []Rule) {
r.dependenciesMutex.Lock()
defer r.dependenciesMutex.Unlock()
r.dependentRules = make([]Rule, len(dependents))
copy(r.dependentRules, dependents)
}
func (r *AlertingRule) NoDependentRules() bool {
r.dependenciesMutex.RLock()
defer r.dependenciesMutex.RUnlock()
if r.dependentRules == nil {
return false // We don't know if there are dependent rules.
}
return len(r.dependentRules) == 0
}
func (r *AlertingRule) DependentRules() []Rule {
r.dependenciesMutex.RLock()
defer r.dependenciesMutex.RUnlock()
return r.dependentRules
}
func (r *AlertingRule) SetDependencyRules(dependencies []Rule) {
r.dependenciesMutex.Lock()
defer r.dependenciesMutex.Unlock()
r.dependencyRules = make([]Rule, len(dependencies))
copy(r.dependencyRules, dependencies)
}
func (r *AlertingRule) NoDependencyRules() bool {
r.dependenciesMutex.RLock()
defer r.dependenciesMutex.RUnlock()
if r.dependencyRules == nil {
return false // We don't know if there are dependency rules.
}
return len(r.dependencyRules) == 0
}
func (r *AlertingRule) DependencyRules() []Rule {
r.dependenciesMutex.RLock()
defer r.dependenciesMutex.RUnlock()
return r.dependencyRules
}
// resolvedRetention is the duration for which a resolved alert instance
// is kept in memory state and consequently repeatedly sent to the AlertManager.
const resolvedRetention = 15 * time.Minute
// Eval evaluates the rule expression and then creates pending alerts and fires
// or removes previously pending alerts accordingly.
func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) {
ctx = NewOriginContext(ctx, NewRuleDetail(r))
res, err := query(ctx, r.vector.String(), ts.Add(-queryOffset))
if err != nil {
return nil, err
}
// Create pending alerts for any new vector elements in the alert expression
// or update the expression value for existing elements.
resultFPs := map[uint64]struct{}{}
lb := labels.NewBuilder(labels.EmptyLabels())
sb := labels.NewScratchBuilder(0)
var vec promql.Vector
alerts := make(map[uint64]*Alert, len(res))
for _, smpl := range res {
// Provide the alert information to the template.
l := smpl.Metric.Map()
tmplData := template.AlertTemplateData(l, r.externalLabels, r.externalURL, smpl)
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := []string{
"{{$labels := .Labels}}",
"{{$externalLabels := .ExternalLabels}}",
"{{$externalURL := .ExternalURL}}",
"{{$value := .Value}}",
}
expand := func(text string) string {
tmpl := template.NewTemplateExpander(
ctx,
strings.Join(append(defs, text), ""),
"__alert_"+r.Name(),
tmplData,
model.Time(timestamp.FromTime(ts)),
template.QueryFunc(query),
externalURL,
nil,
)
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
r.logger.Warn("Expanding alert template failed", "err", err, "data", tmplData)
}
return result
}
lb.Reset(smpl.Metric)
lb.Del(labels.MetricName)
r.labels.Range(func(l labels.Label) {
lb.Set(l.Name, expand(l.Value))
})
lb.Set(labels.AlertName, r.Name())
sb.Reset()
r.annotations.Range(func(a labels.Label) {
sb.Add(a.Name, expand(a.Value))
})
annotations := sb.Labels()
lbs := lb.Labels()
h := lbs.Hash()
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
return nil, ErrDuplicateAlertLabelSet
}
alerts[h] = &Alert{
Labels: lbs,
Annotations: annotations,
ActiveAt: ts,
State: StatePending,
Value: smpl.F,
}
}
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
for h, a := range alerts {
// Check whether we already have alerting state for the identifying label set.
// Update the last value and annotations if so, create a new alert entry otherwise.
if alert, ok := r.active[h]; ok && alert.State != StateInactive {
alert.Value = a.Value
alert.Annotations = a.Annotations
continue
}
r.active[h] = a
}
var numActivePending int
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
for fp, a := range r.active {
if _, ok := resultFPs[fp]; !ok {
// There is no firing alerts for this fingerprint. The alert is no
// longer firing.
// Use keepFiringFor value to determine if the alert should keep
// firing.
var keepFiring bool
if a.State == StateFiring && r.keepFiringFor > 0 {
if a.KeepFiringSince.IsZero() {
a.KeepFiringSince = ts
}
if ts.Sub(a.KeepFiringSince) < r.keepFiringFor {
keepFiring = true
}
}
// If the alert is resolved (was firing but is now inactive) keep it for
// at least the retention period. This is important for a number of reasons:
//
// 1. It allows for Prometheus to be more resilient to network issues that
// would otherwise prevent a resolved alert from being reported as resolved
// to Alertmanager.
//
// 2. It helps reduce the chance of resolved notifications being lost if
// Alertmanager crashes or restarts between receiving the resolved alert
// from Prometheus and sending the resolved notification. This tends to
// occur for routes with large Group intervals.
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
delete(r.active, fp)
}
if a.State != StateInactive && !keepFiring {
a.State = StateInactive
a.ResolvedAt = ts
}
if !keepFiring {
continue
}
} else {
// The alert is firing, reset keepFiringSince.
a.KeepFiringSince = time.Time{}
}
numActivePending++
if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
a.State = StateFiring
a.FiredAt = ts
}
if r.restored.Load() {
vec = append(vec, r.sample(a, ts.Add(-queryOffset)))
vec = append(vec, r.forStateSample(a, ts.Add(-queryOffset), float64(a.ActiveAt.Unix())))
}
}
if limit > 0 && numActivePending > limit {
r.active = map[uint64]*Alert{}
return nil, fmt.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending)
}
return vec, nil
}
// State returns the maximum state of alert instances for this rule.
// StateFiring > StatePending > StateInactive > StateUnknown.
func (r *AlertingRule) State() AlertState {
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
// Check if the rule has been evaluated
if r.evaluationTimestamp.Load().IsZero() {
return StateUnknown
}
maxState := StateInactive
for _, a := range r.active {
if a.State > maxState {
maxState = a.State
}
}
return maxState
}
// ActiveAlerts returns a slice of active alerts.
func (r *AlertingRule) ActiveAlerts() []*Alert {
var res []*Alert
for _, a := range r.currentAlerts() {
if a.ResolvedAt.IsZero() {
res = append(res, a)
}
}
return res
}
// currentAlerts returns all instances of alerts for this rule. This may include
// inactive alerts that were previously firing.
func (r *AlertingRule) currentAlerts() []*Alert {
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
alerts := make([]*Alert, 0, len(r.active))
for _, a := range r.active {
anew := *a
alerts = append(alerts, &anew)
}
return alerts
}
// ForEachActiveAlert runs the given function on each alert.
// This should be used when you want to use the actual alerts from the AlertingRule
// and not on its copy.
// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'.
func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
for _, a := range r.active {
f(a)
}
}
func (r *AlertingRule) ActiveAlertsCount() int {
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
return len(r.active)
}
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) {
alerts := []*Alert{}
r.ForEachActiveAlert(func(alert *Alert) {
if alert.needsSending(ts, resendDelay) {
alert.LastSentAt = ts
// Allow for two Eval or Alertmanager send failures.
delta := max(interval, resendDelay)
alert.ValidUntil = ts.Add(4 * delta)
anew := *alert
// The notifier re-uses the labels slice, hence make a copy.
anew.Labels = alert.Labels.Copy()
alerts = append(alerts, &anew)
}
})
notifyFunc(ctx, r.vector.String(), alerts...)
}
func (r *AlertingRule) String() string {
ar := rulefmt.Rule{
Alert: r.name,
Expr: r.vector.String(),
For: model.Duration(r.holdDuration),
KeepFiringFor: model.Duration(r.keepFiringFor),
Labels: r.labels.Map(),
Annotations: r.annotations.Map(),
}
byt, err := yaml.Marshal(ar)
if err != nil {
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
}
return string(byt)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/rule.go | rules/rule.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"net/url"
"time"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
)
// RuleHealth describes the health state of a rule.
type RuleHealth string
// The possible health states of a rule based on the last execution.
const (
HealthUnknown RuleHealth = "unknown"
HealthGood RuleHealth = "ok"
HealthBad RuleHealth = "err"
)
// A Rule encapsulates a vector expression which is evaluated at a specified
// interval and acted upon (currently either recorded or used for alerting).
type Rule interface {
Name() string
// Labels of the rule.
Labels() labels.Labels
// Eval evaluates the rule, including any associated recording or alerting actions.
Eval(ctx context.Context, queryOffset time.Duration, evaluationTime time.Time, queryFunc QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error)
// String returns a human-readable string representation of the rule.
String() string
// Query returns the rule query expression.
Query() parser.Expr
// SetLastError sets the current error experienced by the rule.
SetLastError(error)
// LastError returns the last error experienced by the rule.
LastError() error
// SetHealth sets the current health of the rule.
SetHealth(RuleHealth)
// Health returns the current health of the rule.
Health() RuleHealth
SetEvaluationDuration(time.Duration)
// GetEvaluationDuration returns last evaluation duration.
// NOTE: Used dynamically by rules.html template.
GetEvaluationDuration() time.Duration
SetEvaluationTimestamp(time.Time)
// GetEvaluationTimestamp returns last evaluation timestamp.
// NOTE: Used dynamically by rules.html template.
GetEvaluationTimestamp() time.Time
// SetDependentRules sets rules which depend on the output of this rule.
SetDependentRules(rules []Rule)
// NoDependentRules returns true if it's guaranteed that in the rule group there's no other rule
// which depends on this one. In case this function returns false there's no such guarantee, which
// means there may or may not be other rules depending on this one.
NoDependentRules() bool
// DependentRules returns the rules which depend on the output of this rule.
DependentRules() []Rule
// SetDependencyRules sets rules on which this rule depends.
SetDependencyRules(rules []Rule)
// NoDependencyRules returns true if it's guaranteed that this rule doesn't depend on the output of
// any other rule in the group. In case this function returns false there's no such guarantee, which
// means the rule may or may not depend on other rules.
NoDependencyRules() bool
// DependencyRules returns the rules on which this rule depends.
DependencyRules() []Rule
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/origin_test.go | rules/origin_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"net/url"
"testing"
"time"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
)
type unknownRule struct{}
func (unknownRule) Name() string { return "" }
func (unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
func (unknownRule) Eval(context.Context, time.Duration, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) {
return nil, nil
}
func (unknownRule) String() string { return "" }
func (unknownRule) Query() parser.Expr { return nil }
func (unknownRule) SetLastError(error) {}
func (unknownRule) LastError() error { return nil }
func (unknownRule) SetHealth(RuleHealth) {}
func (unknownRule) Health() RuleHealth { return "" }
func (unknownRule) SetEvaluationDuration(time.Duration) {}
func (unknownRule) GetEvaluationDuration() time.Duration { return 0 }
func (unknownRule) SetEvaluationTimestamp(time.Time) {}
func (unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
func (unknownRule) SetDependentRules([]Rule) {}
func (unknownRule) NoDependentRules() bool { return false }
func (unknownRule) DependentRules() []Rule { return nil }
func (unknownRule) SetDependencyRules([]Rule) {}
func (unknownRule) NoDependencyRules() bool { return false }
func (unknownRule) DependencyRules() []Rule { return nil }
func TestNewRuleDetailPanics(t *testing.T) {
require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() {
NewRuleDetail(unknownRule{})
})
}
func TestFromOriginContext(t *testing.T) {
t.Run("should return zero value if RuleDetail is missing in the context", func(t *testing.T) {
detail := FromOriginContext(context.Background())
require.Zero(t, detail)
// The zero value for NoDependentRules must be the most conservative option.
require.False(t, detail.NoDependentRules)
// The zero value for NoDependencyRules must be the most conservative option.
require.False(t, detail.NoDependencyRules)
})
}
func TestNewRuleDetail(t *testing.T) {
t.Run("should populate NoDependentRules and NoDependencyRules for a RecordingRule", func(t *testing.T) {
rule := NewRecordingRule("test", &parser.NumberLiteral{Val: 1}, labels.EmptyLabels())
detail := NewRuleDetail(rule)
require.False(t, detail.NoDependentRules)
require.False(t, detail.NoDependencyRules)
rule.SetDependentRules([]Rule{})
detail = NewRuleDetail(rule)
require.True(t, detail.NoDependentRules)
require.False(t, detail.NoDependencyRules)
rule.SetDependencyRules([]Rule{})
detail = NewRuleDetail(rule)
require.True(t, detail.NoDependentRules)
require.True(t, detail.NoDependencyRules)
})
t.Run("should populate NoDependentRules and NoDependencyRules for a AlertingRule", func(t *testing.T) {
rule := NewAlertingRule(
"test",
&parser.NumberLiteral{Val: 1},
time.Minute,
0,
labels.FromStrings("test", "test"),
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, promslog.NewNopLogger(),
)
detail := NewRuleDetail(rule)
require.False(t, detail.NoDependentRules)
require.False(t, detail.NoDependencyRules)
rule.SetDependentRules([]Rule{})
detail = NewRuleDetail(rule)
require.True(t, detail.NoDependentRules)
require.False(t, detail.NoDependencyRules)
rule.SetDependencyRules([]Rule{})
detail = NewRuleDetail(rule)
require.True(t, detail.NoDependentRules)
require.True(t, detail.NoDependencyRules)
})
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/rules/manager.go | rules/manager.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"errors"
"fmt"
"log/slog"
maps0 "maps"
"net/url"
"path/filepath"
"slices"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"golang.org/x/sync/semaphore"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/strutil"
)
// QueryFunc processes PromQL queries.
type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, error)
// EngineQueryFunc returns a new query function that executes instant queries against
// the given engine.
// It converts scalar into vector results.
func EngineQueryFunc(engine promql.QueryEngine, q storage.Queryable) QueryFunc {
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
q, err := engine.NewInstantQuery(ctx, q, nil, qs, t)
if err != nil {
return nil, err
}
res := q.Exec(ctx)
if res.Err != nil {
return nil, res.Err
}
switch v := res.Value.(type) {
case promql.Vector:
return v, nil
case promql.Scalar:
return promql.Vector{promql.Sample{
T: v.T,
F: v.V,
Metric: labels.Labels{},
}}, nil
default:
return nil, errors.New("rule result is not a vector or scalar")
}
}
}
// DefaultEvalIterationFunc is the default implementation of
// GroupEvalIterationFunc that is periodically invoked to evaluate the rules
// in a group at a given point in time and updates Group state and metrics
// accordingly. Custom GroupEvalIterationFunc implementations are recommended
// to invoke this function as well, to ensure correct Group state and metrics
// are maintained.
func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.Time) {
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc()
start := time.Now()
g.Eval(ctx, evalTimestamp)
timeSinceStart := time.Since(start)
g.metrics.IterationDuration.Observe(timeSinceStart.Seconds())
g.metrics.IterationDurationHistogram.Observe(timeSinceStart.Seconds())
g.updateRuleEvaluationTimeSum()
g.setEvaluationTime(timeSinceStart)
g.setLastEvaluation(start)
g.setLastEvalTimestamp(evalTimestamp)
}
// The Manager manages recording and alerting rules.
type Manager struct {
opts *ManagerOptions
groups map[string]*Group
mtx sync.RWMutex
block chan struct{}
done chan struct{}
restored bool
restoreNewRuleGroups bool
logger *slog.Logger
}
// NotifyFunc sends notifications about a set of alerts generated by the given expression.
type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
// ManagerOptions bundles options for the Manager.
type ManagerOptions struct {
NameValidationScheme model.ValidationScheme
ExternalURL *url.URL
QueryFunc QueryFunc
NotifyFunc NotifyFunc
Context context.Context
Appendable storage.Appendable
Queryable storage.Queryable
Logger *slog.Logger
Registerer prometheus.Registerer
OutageTolerance time.Duration
ForGracePeriod time.Duration
ResendDelay time.Duration
GroupLoader GroupLoader
DefaultRuleQueryOffset func() time.Duration
MaxConcurrentEvals int64
ConcurrentEvalsEnabled bool
RuleConcurrencyController RuleConcurrencyController
RuleDependencyController RuleDependencyController
// At present, manager only restores `for` state when manager is newly created which happens
// during restarts. This flag provides an option to restore the `for` state when new rule groups are
// added to an existing manager
RestoreNewRuleGroups bool
Metrics *Metrics
// FeatureRegistry is used to register rule manager features.
FeatureRegistry features.Collector
}
// NewManager returns an implementation of Manager, ready to be started
// by calling the Run method.
func NewManager(o *ManagerOptions) *Manager {
switch o.NameValidationScheme {
case model.UTF8Validation, model.LegacyValidation:
case model.UnsetValidation:
o.NameValidationScheme = model.UTF8Validation
default:
panic(fmt.Errorf("unrecognized name validation scheme: %s", o.NameValidationScheme))
}
if o.Context == nil {
o.Context = context.Background()
}
if o.Metrics == nil {
o.Metrics = NewGroupMetrics(o.Registerer)
}
if o.GroupLoader == nil {
o.GroupLoader = FileLoader{}
}
if o.RuleConcurrencyController == nil {
if o.ConcurrentEvalsEnabled {
o.RuleConcurrencyController = newRuleConcurrencyController(o.MaxConcurrentEvals)
} else {
o.RuleConcurrencyController = sequentialRuleEvalController{}
}
}
if o.RuleDependencyController == nil {
o.RuleDependencyController = ruleDependencyController{}
}
if o.Logger == nil {
o.Logger = promslog.NewNopLogger()
}
// Register rule manager features if a registry is provided.
if o.FeatureRegistry != nil {
o.FeatureRegistry.Set(features.Rules, "concurrent_rule_eval", o.ConcurrentEvalsEnabled)
o.FeatureRegistry.Enable(features.Rules, "query_offset")
o.FeatureRegistry.Enable(features.Rules, "keep_firing_for")
}
m := &Manager{
groups: map[string]*Group{},
opts: o,
block: make(chan struct{}),
done: make(chan struct{}),
logger: o.Logger,
restoreNewRuleGroups: o.RestoreNewRuleGroups,
}
return m
}
// Run starts processing of the rule manager. It is blocking.
func (m *Manager) Run() {
m.logger.Info("Starting rule manager...")
m.start()
<-m.done
}
func (m *Manager) start() {
close(m.block)
}
// Stop the rule manager's rule evaluation cycles.
func (m *Manager) Stop() {
m.mtx.Lock()
defer m.mtx.Unlock()
m.logger.Info("Stopping rule manager...")
// Stop all groups asynchronously, then wait for them to finish.
// This is faster than stopping and waiting for each group in sequence.
for _, eg := range m.groups {
eg.stopAsync()
}
for _, eg := range m.groups {
eg.waitStopped()
}
// Shut down the groups waiting multiple evaluation intervals to write
// staleness markers.
close(m.done)
m.logger.Info("Rule manager stopped")
}
// Update the rule manager's state as the config requires. If
// loading the new rules failed the old rule set is restored.
// This method will no-op in case the manager is already stopped.
func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error {
m.mtx.Lock()
defer m.mtx.Unlock()
// We cannot update a stopped manager
select {
case <-m.done:
return nil
default:
}
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, false, files...)
if errs != nil {
for _, e := range errs {
m.logger.Error("loading groups failed", "err", e)
}
return errors.New("error loading rules, previous rule set restored")
}
m.restored = true
var wg sync.WaitGroup
for _, newg := range groups {
// If there is an old group with the same identifier,
// check if new group equals with the old group, if yes then skip it.
// If not equals, stop it and wait for it to finish the current iteration.
// Then copy it into the new group.
gn := GroupKey(newg.file, newg.name)
oldg, ok := m.groups[gn]
delete(m.groups, gn)
if ok && oldg.Equals(newg) {
groups[gn] = oldg
continue
}
wg.Add(1)
go func(newg *Group) {
if ok {
oldg.stop()
newg.CopyState(oldg)
}
wg.Done()
// Wait with starting evaluation until the rule manager
// is told to run. This is necessary to avoid running
// queries against a bootstrapping storage.
<-m.block
newg.run(m.opts.Context)
}(newg)
}
// Stop remaining old groups.
wg.Add(len(m.groups))
for n, oldg := range m.groups {
go func(n string, g *Group) {
g.markStale = true
g.stop()
if m := g.metrics; m != nil {
m.IterationsMissed.DeleteLabelValues(n)
m.IterationsScheduled.DeleteLabelValues(n)
m.EvalTotal.DeleteLabelValues(n)
m.EvalFailures.DeleteLabelValues(n)
m.GroupInterval.DeleteLabelValues(n)
m.GroupLastEvalTime.DeleteLabelValues(n)
m.GroupLastDuration.DeleteLabelValues(n)
m.GroupRules.DeleteLabelValues(n)
m.GroupSamples.DeleteLabelValues((n))
}
wg.Done()
}(n, oldg)
}
wg.Wait()
m.groups = groups
return nil
}
// GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them.
type GroupLoader interface {
Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error)
Parse(query string) (parser.Expr, error)
}
// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile
// and parser.ParseExpr.
type FileLoader struct{}
func (FileLoader) Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error) {
return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme)
}
func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) }
// LoadGroups reads groups from a list of files.
func (m *Manager) LoadGroups(
interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, ignoreUnknownFields bool, filenames ...string,
) (map[string]*Group, []error) {
groups := make(map[string]*Group)
shouldRestore := !m.restored || m.restoreNewRuleGroups
for _, fn := range filenames {
rgs, errs := m.opts.GroupLoader.Load(fn, ignoreUnknownFields, m.opts.NameValidationScheme)
if errs != nil {
return nil, errs
}
for _, rg := range rgs.Groups {
itv := interval
if rg.Interval != 0 {
itv = time.Duration(rg.Interval)
}
rules := make([]Rule, 0, len(rg.Rules))
for _, r := range rg.Rules {
expr, err := m.opts.GroupLoader.Parse(r.Expr)
if err != nil {
return nil, []error{fmt.Errorf("%s: %w", fn, err)}
}
mLabels := FromMaps(rg.Labels, r.Labels)
if r.Alert != "" {
rules = append(rules, NewAlertingRule(
r.Alert,
expr,
time.Duration(r.For),
time.Duration(r.KeepFiringFor),
mLabels,
labels.FromMap(r.Annotations),
externalLabels,
externalURL,
!shouldRestore,
m.logger.With("alert", r.Alert),
))
continue
}
rules = append(rules, NewRecordingRule(
r.Record,
expr,
mLabels,
))
}
// Check dependencies between rules and store it on the Rule itself.
m.opts.RuleDependencyController.AnalyseRules(rules)
groups[GroupKey(fn, rg.Name)] = NewGroup(GroupOptions{
Name: rg.Name,
File: fn,
Interval: itv,
Limit: rg.Limit,
Rules: rules,
ShouldRestore: shouldRestore,
Opts: m.opts,
QueryOffset: (*time.Duration)(rg.QueryOffset),
done: m.done,
EvalIterationFunc: groupEvalIterationFunc,
})
}
}
return groups, nil
}
// RuleGroups returns the list of manager's rule groups.
func (m *Manager) RuleGroups() []*Group {
m.mtx.RLock()
defer m.mtx.RUnlock()
rgs := make([]*Group, 0, len(m.groups))
for _, g := range m.groups {
rgs = append(rgs, g)
}
slices.SortFunc(rgs, func(a, b *Group) int {
fileCompare := strings.Compare(a.file, b.file)
// If its 0, then the file names are the same.
// Lets look at the group names in that case.
if fileCompare != 0 {
return fileCompare
}
return strings.Compare(a.name, b.name)
})
return rgs
}
// Rules returns the list of the manager's rules.
func (m *Manager) Rules(matcherSets ...[]*labels.Matcher) []Rule {
m.mtx.RLock()
defer m.mtx.RUnlock()
var rules []Rule
for _, g := range m.groups {
rules = append(rules, g.Rules(matcherSets...)...)
}
return rules
}
// AlertingRules returns the list of the manager's alerting rules.
func (m *Manager) AlertingRules() []*AlertingRule {
alerts := []*AlertingRule{}
for _, rule := range m.Rules() {
if alertingRule, ok := rule.(*AlertingRule); ok {
alerts = append(alerts, alertingRule)
}
}
return alerts
}
type Sender interface {
Send(alerts ...*notifier.Alert)
}
// SendAlerts implements the rules.NotifyFunc for a Notifier.
func SendAlerts(s Sender, externalURL string) NotifyFunc {
return func(_ context.Context, expr string, alerts ...*Alert) {
var res []*notifier.Alert
for _, alert := range alerts {
a := ¬ifier.Alert{
StartsAt: alert.FiredAt,
Labels: alert.Labels,
Annotations: alert.Annotations,
GeneratorURL: externalURL + strutil.TableLinkForExpression(expr),
}
if !alert.ResolvedAt.IsZero() {
a.EndsAt = alert.ResolvedAt
} else {
a.EndsAt = alert.ValidUntil
}
res = append(res, a)
}
if len(alerts) > 0 {
s.Send(res...)
}
}
}
// RuleDependencyController controls whether a set of rules have dependencies between each other.
type RuleDependencyController interface {
// AnalyseRules analyses dependencies between the input rules. For each rule that it's guaranteed
// not having any dependants and/or dependency, this function should call Rule.SetDependentRules(...)
// and/or Rule.SetDependencyRules(...).
AnalyseRules(rules []Rule)
}
type ruleDependencyController struct{}
// AnalyseRules implements RuleDependencyController.
func (ruleDependencyController) AnalyseRules(rules []Rule) {
depMap := buildDependencyMap(rules)
if depMap == nil {
return
}
for _, r := range rules {
r.SetDependentRules(depMap.dependents(r))
r.SetDependencyRules(depMap.dependencies(r))
}
}
// ConcurrentRules represents a slice of indexes of rules that can be evaluated concurrently.
type ConcurrentRules []int
// RuleConcurrencyController controls concurrency for rules that are safe to be evaluated concurrently.
// Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus
// server with additional query load. Concurrency is controlled globally, not on a per-group basis.
type RuleConcurrencyController interface {
// SplitGroupIntoBatches returns an ordered slice of ConcurrentRules, which are batches of rules that can be evaluated concurrently.
// The rules are represented by their index from the input rule group.
SplitGroupIntoBatches(ctx context.Context, group *Group) []ConcurrentRules
// Allow determines if the given rule is allowed to be evaluated concurrently.
// If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done.
// It is important that both *Group and Rule are not retained and only be used for the duration of the call.
Allow(ctx context.Context, group *Group, rule Rule) bool
// Done releases a concurrent evaluation slot.
Done(ctx context.Context)
}
// concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules.
type concurrentRuleEvalController struct {
sema *semaphore.Weighted
}
func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController {
return &concurrentRuleEvalController{
sema: semaphore.NewWeighted(maxConcurrency),
}
}
func (c *concurrentRuleEvalController) Allow(context.Context, *Group, Rule) bool {
return c.sema.TryAcquire(1)
}
func (*concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules {
// Using the rule dependency controller information (rules being identified as having no dependencies or no dependants),
// we can safely run the following concurrent groups:
// 1. Concurrently, all rules that have no dependencies
// 2. Sequentially, all rules that have both dependencies and dependants
// 3. Concurrently, all rules that have no dependants
var noDependencies []int
var dependenciesAndDependants []int
var noDependants []int
for i, r := range g.rules {
switch {
case r.NoDependencyRules():
noDependencies = append(noDependencies, i)
case !r.NoDependentRules() && !r.NoDependencyRules():
dependenciesAndDependants = append(dependenciesAndDependants, i)
case r.NoDependentRules():
noDependants = append(noDependants, i)
}
}
var order []ConcurrentRules
if len(noDependencies) > 0 {
order = append(order, noDependencies)
}
for _, r := range dependenciesAndDependants {
order = append(order, []int{r})
}
if len(noDependants) > 0 {
order = append(order, noDependants)
}
return order
}
func (c *concurrentRuleEvalController) Done(context.Context) {
c.sema.Release(1)
}
var _ RuleConcurrencyController = &sequentialRuleEvalController{}
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
type sequentialRuleEvalController struct{}
func (sequentialRuleEvalController) Allow(context.Context, *Group, Rule) bool {
return false
}
func (sequentialRuleEvalController) SplitGroupIntoBatches(context.Context, *Group) []ConcurrentRules {
return nil
}
func (sequentialRuleEvalController) Done(context.Context) {}
// FromMaps returns new sorted Labels from the given maps, overriding each other in order.
func FromMaps(maps ...map[string]string) labels.Labels {
mLables := make(map[string]string)
for _, m := range maps {
maps0.Copy(mLables, m)
}
return labels.FromMap(mLables)
}
// ParseFiles parses the rule files corresponding to glob patterns.
func ParseFiles(patterns []string, nameValidationScheme model.ValidationScheme) error {
files := map[string]string{}
for _, pat := range patterns {
fns, err := filepath.Glob(pat)
if err != nil {
return fmt.Errorf("failed retrieving rule files for %q: %w", pat, err)
}
for _, fn := range fns {
absPath, err := filepath.Abs(fn)
if err != nil {
absPath = fn
}
cleanPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
return fmt.Errorf("failed evaluating rule file path %q (pattern: %q): %w", absPath, pat, err)
}
files[cleanPath] = pat
}
}
for fn, pat := range files {
_, errs := rulefmt.ParseFile(fn, false, nameValidationScheme)
if len(errs) > 0 {
return fmt.Errorf("parse rules from file %q (pattern: %q): %w", fn, pat, errors.Join(errs...))
}
}
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/plugins/plugins.go | plugins/plugins.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by "make plugins". DO NOT EDIT.
package plugins
import (
// Register aws plugin.
_ "github.com/prometheus/prometheus/discovery/aws"
// Register azure plugin.
_ "github.com/prometheus/prometheus/discovery/azure"
// Register consul plugin.
_ "github.com/prometheus/prometheus/discovery/consul"
// Register digitalocean plugin.
_ "github.com/prometheus/prometheus/discovery/digitalocean"
// Register dns plugin.
_ "github.com/prometheus/prometheus/discovery/dns"
// Register eureka plugin.
_ "github.com/prometheus/prometheus/discovery/eureka"
// Register gce plugin.
_ "github.com/prometheus/prometheus/discovery/gce"
// Register hetzner plugin.
_ "github.com/prometheus/prometheus/discovery/hetzner"
// Register ionos plugin.
_ "github.com/prometheus/prometheus/discovery/ionos"
// Register kubernetes plugin.
_ "github.com/prometheus/prometheus/discovery/kubernetes"
// Register linode plugin.
_ "github.com/prometheus/prometheus/discovery/linode"
// Register marathon plugin.
_ "github.com/prometheus/prometheus/discovery/marathon"
// Register moby plugin.
_ "github.com/prometheus/prometheus/discovery/moby"
// Register nomad plugin.
_ "github.com/prometheus/prometheus/discovery/nomad"
// Register openstack plugin.
_ "github.com/prometheus/prometheus/discovery/openstack"
// Register ovhcloud plugin.
_ "github.com/prometheus/prometheus/discovery/ovhcloud"
// Register puppetdb plugin.
_ "github.com/prometheus/prometheus/discovery/puppetdb"
// Register scaleway plugin.
_ "github.com/prometheus/prometheus/discovery/scaleway"
// Register stackit plugin.
_ "github.com/prometheus/prometheus/discovery/stackit"
// Register triton plugin.
_ "github.com/prometheus/prometheus/discovery/triton"
// Register uyuni plugin.
_ "github.com/prometheus/prometheus/discovery/uyuni"
// Register vultr plugin.
_ "github.com/prometheus/prometheus/discovery/vultr"
// Register xds plugin.
_ "github.com/prometheus/prometheus/discovery/xds"
// Register zookeeper plugin.
_ "github.com/prometheus/prometheus/discovery/zookeeper"
)
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/plugins/minimum.go | plugins/minimum.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugins
import (
_ "github.com/prometheus/prometheus/discovery/file" // Register file plugin.
_ "github.com/prometheus/prometheus/discovery/http" // Register http plugin.
)
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/plugins/generate.go | plugins/generate.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build plugins
package main
import (
"fmt"
"log"
"os"
"path"
"path/filepath"
"go.yaml.in/yaml/v2"
)
//go:generate go run generate.go
func main() {
data, err := os.ReadFile(filepath.Join("..", "plugins.yml"))
if err != nil {
log.Fatal(err)
}
var plugins []string
err = yaml.Unmarshal(data, &plugins)
if err != nil {
log.Fatal(err)
}
f, err := os.Create("plugins.go")
if err != nil {
log.Fatal(err)
}
defer f.Close()
_, err = f.WriteString(`// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by "make plugins". DO NOT EDIT.
package plugins
`)
if err != nil {
log.Fatal(err)
}
if len(plugins) == 0 {
return
}
_, err = f.WriteString("import (\n")
if err != nil {
log.Fatal(err)
}
for _, plugin := range plugins {
_, err = f.WriteString(fmt.Sprintf("\t// Register %s plugin.\n", path.Base(plugin)))
if err != nil {
log.Fatal(err)
}
_, err = f.WriteString(fmt.Sprintf("\t_ \"%s\"\n", plugin))
if err != nil {
log.Fatal(err)
}
}
_, err = f.WriteString(")\n")
if err != nil {
log.Fatal(err)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/custom.go | prompb/custom.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prompb
import (
"sync"
)
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
size := r.Size()
data, ok := p.Get().(*[]byte)
if ok && cap(*data) >= size {
n, err := r.MarshalToSizedBuffer((*data)[:size])
if err != nil {
return nil, err
}
return (*data)[:n], nil
}
return r.Marshal()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/types.pb.go | prompb/types.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: types.proto
package prompb
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type MetricMetadata_MetricType int32
const (
MetricMetadata_UNKNOWN MetricMetadata_MetricType = 0
MetricMetadata_COUNTER MetricMetadata_MetricType = 1
MetricMetadata_GAUGE MetricMetadata_MetricType = 2
MetricMetadata_HISTOGRAM MetricMetadata_MetricType = 3
MetricMetadata_GAUGEHISTOGRAM MetricMetadata_MetricType = 4
MetricMetadata_SUMMARY MetricMetadata_MetricType = 5
MetricMetadata_INFO MetricMetadata_MetricType = 6
MetricMetadata_STATESET MetricMetadata_MetricType = 7
)
var MetricMetadata_MetricType_name = map[int32]string{
0: "UNKNOWN",
1: "COUNTER",
2: "GAUGE",
3: "HISTOGRAM",
4: "GAUGEHISTOGRAM",
5: "SUMMARY",
6: "INFO",
7: "STATESET",
}
var MetricMetadata_MetricType_value = map[string]int32{
"UNKNOWN": 0,
"COUNTER": 1,
"GAUGE": 2,
"HISTOGRAM": 3,
"GAUGEHISTOGRAM": 4,
"SUMMARY": 5,
"INFO": 6,
"STATESET": 7,
}
func (x MetricMetadata_MetricType) String() string {
return proto.EnumName(MetricMetadata_MetricType_name, int32(x))
}
func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{0, 0}
}
type Histogram_ResetHint int32
const (
Histogram_UNKNOWN Histogram_ResetHint = 0
Histogram_YES Histogram_ResetHint = 1
Histogram_NO Histogram_ResetHint = 2
Histogram_GAUGE Histogram_ResetHint = 3
)
var Histogram_ResetHint_name = map[int32]string{
0: "UNKNOWN",
1: "YES",
2: "NO",
3: "GAUGE",
}
var Histogram_ResetHint_value = map[string]int32{
"UNKNOWN": 0,
"YES": 1,
"NO": 2,
"GAUGE": 3,
}
func (x Histogram_ResetHint) String() string {
return proto.EnumName(Histogram_ResetHint_name, int32(x))
}
func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{3, 0}
}
type LabelMatcher_Type int32
const (
LabelMatcher_EQ LabelMatcher_Type = 0
LabelMatcher_NEQ LabelMatcher_Type = 1
LabelMatcher_RE LabelMatcher_Type = 2
LabelMatcher_NRE LabelMatcher_Type = 3
)
var LabelMatcher_Type_name = map[int32]string{
0: "EQ",
1: "NEQ",
2: "RE",
3: "NRE",
}
var LabelMatcher_Type_value = map[string]int32{
"EQ": 0,
"NEQ": 1,
"RE": 2,
"NRE": 3,
}
func (x LabelMatcher_Type) String() string {
return proto.EnumName(LabelMatcher_Type_name, int32(x))
}
func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{8, 0}
}
// We require this to match chunkenc.Encoding.
type Chunk_Encoding int32
const (
Chunk_UNKNOWN Chunk_Encoding = 0
Chunk_XOR Chunk_Encoding = 1
Chunk_HISTOGRAM Chunk_Encoding = 2
Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3
)
var Chunk_Encoding_name = map[int32]string{
0: "UNKNOWN",
1: "XOR",
2: "HISTOGRAM",
3: "FLOAT_HISTOGRAM",
}
var Chunk_Encoding_value = map[string]int32{
"UNKNOWN": 0,
"XOR": 1,
"HISTOGRAM": 2,
"FLOAT_HISTOGRAM": 3,
}
func (x Chunk_Encoding) String() string {
return proto.EnumName(Chunk_Encoding_name, int32(x))
}
func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{10, 0}
}
type MetricMetadata struct {
// Represents the metric type, these match the set from Prometheus.
// Refer to github.com/prometheus/common/model/metadata.go for details.
Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"`
MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"`
Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"`
Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MetricMetadata) Reset() { *m = MetricMetadata{} }
func (m *MetricMetadata) String() string { return proto.CompactTextString(m) }
func (*MetricMetadata) ProtoMessage() {}
func (*MetricMetadata) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{0}
}
func (m *MetricMetadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MetricMetadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricMetadata.Merge(m, src)
}
func (m *MetricMetadata) XXX_Size() int {
return m.Size()
}
func (m *MetricMetadata) XXX_DiscardUnknown() {
xxx_messageInfo_MetricMetadata.DiscardUnknown(m)
}
var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo
func (m *MetricMetadata) GetType() MetricMetadata_MetricType {
if m != nil {
return m.Type
}
return MetricMetadata_UNKNOWN
}
func (m *MetricMetadata) GetMetricFamilyName() string {
if m != nil {
return m.MetricFamilyName
}
return ""
}
func (m *MetricMetadata) GetHelp() string {
if m != nil {
return m.Help
}
return ""
}
func (m *MetricMetadata) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{1}
}
func (m *Sample) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Sample) XXX_Merge(src proto.Message) {
xxx_messageInfo_Sample.Merge(m, src)
}
func (m *Sample) XXX_Size() int {
return m.Size()
}
func (m *Sample) XXX_DiscardUnknown() {
xxx_messageInfo_Sample.DiscardUnknown(m)
}
var xxx_messageInfo_Sample proto.InternalMessageInfo
func (m *Sample) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Sample) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
type Exemplar struct {
// Optional, can be empty.
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{2}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return m.Size()
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabels() []Label {
if m != nil {
return m.Labels
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
// A native histogram, also known as a sparse histogram.
// Original design doc:
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
// The appendix of this design doc also explains the concept of float
// histograms. This Histogram message can represent both, the usual
// integer histogram as well as a float histogram.
type Histogram struct {
// Types that are valid to be assigned to Count:
//
// *Histogram_CountInt
// *Histogram_CountFloat
Count isHistogram_Count `protobuf_oneof:"count"`
Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"`
// The schema defines the bucket schema. Currently, valid numbers
// are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
// is a bucket boundary in each case, and then each power of two is
// divided into 2^n logarithmic buckets. Or in other words, each
// bucket boundary is the previous boundary times 2^(2^-n). In the
// future, more bucket schemas may be added using numbers < -4 or >
// 8.
Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
// Types that are valid to be assigned to ZeroCount:
//
// *Histogram_ZeroCountInt
// *Histogram_ZeroCountFloat
ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
// Negative Buckets.
NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
// Use either "negative_deltas" or "negative_counts", the former for
// regular histograms with integer counts, the latter for float
// histograms.
NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
// Positive Buckets.
PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
// Use either "positive_deltas" or "positive_counts", the former for
// regular histograms with integer counts, the latter for float
// histograms.
PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"`
PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`
ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"`
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// custom_values are not part of the specification, DO NOT use in remote write clients.
// Used only for converting from OpenTelemetry to Prometheus internally.
CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{3}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return m.Size()
}
func (m *Histogram) XXX_DiscardUnknown() {
xxx_messageInfo_Histogram.DiscardUnknown(m)
}
var xxx_messageInfo_Histogram proto.InternalMessageInfo
type isHistogram_Count interface {
isHistogram_Count()
MarshalTo([]byte) (int, error)
Size() int
}
type isHistogram_ZeroCount interface {
isHistogram_ZeroCount()
MarshalTo([]byte) (int, error)
Size() int
}
type Histogram_CountInt struct {
CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"`
}
type Histogram_CountFloat struct {
CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"`
}
type Histogram_ZeroCountInt struct {
ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"`
}
type Histogram_ZeroCountFloat struct {
ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"`
}
func (*Histogram_CountInt) isHistogram_Count() {}
func (*Histogram_CountFloat) isHistogram_Count() {}
func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {}
func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
func (m *Histogram) GetCount() isHistogram_Count {
if m != nil {
return m.Count
}
return nil
}
func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
if m != nil {
return m.ZeroCount
}
return nil
}
func (m *Histogram) GetCountInt() uint64 {
if x, ok := m.GetCount().(*Histogram_CountInt); ok {
return x.CountInt
}
return 0
}
func (m *Histogram) GetCountFloat() float64 {
if x, ok := m.GetCount().(*Histogram_CountFloat); ok {
return x.CountFloat
}
return 0
}
func (m *Histogram) GetSum() float64 {
if m != nil {
return m.Sum
}
return 0
}
func (m *Histogram) GetSchema() int32 {
if m != nil {
return m.Schema
}
return 0
}
func (m *Histogram) GetZeroThreshold() float64 {
if m != nil {
return m.ZeroThreshold
}
return 0
}
func (m *Histogram) GetZeroCountInt() uint64 {
if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok {
return x.ZeroCountInt
}
return 0
}
func (m *Histogram) GetZeroCountFloat() float64 {
if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
return x.ZeroCountFloat
}
return 0
}
func (m *Histogram) GetNegativeSpans() []BucketSpan {
if m != nil {
return m.NegativeSpans
}
return nil
}
func (m *Histogram) GetNegativeDeltas() []int64 {
if m != nil {
return m.NegativeDeltas
}
return nil
}
func (m *Histogram) GetNegativeCounts() []float64 {
if m != nil {
return m.NegativeCounts
}
return nil
}
func (m *Histogram) GetPositiveSpans() []BucketSpan {
if m != nil {
return m.PositiveSpans
}
return nil
}
func (m *Histogram) GetPositiveDeltas() []int64 {
if m != nil {
return m.PositiveDeltas
}
return nil
}
func (m *Histogram) GetPositiveCounts() []float64 {
if m != nil {
return m.PositiveCounts
}
return nil
}
func (m *Histogram) GetResetHint() Histogram_ResetHint {
if m != nil {
return m.ResetHint
}
return Histogram_UNKNOWN
}
func (m *Histogram) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *Histogram) GetCustomValues() []float64 {
if m != nil {
return m.CustomValues
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Histogram) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Histogram_CountInt)(nil),
(*Histogram_CountFloat)(nil),
(*Histogram_ZeroCountInt)(nil),
(*Histogram_ZeroCountFloat)(nil),
}
}
// A BucketSpan defines a number of consecutive buckets with their
// offset. Logically, it would be more straightforward to include the
// bucket counts in the Span. However, the protobuf representation is
// more compact in the way the data is structured here (with all the
// buckets in a single array separate from the Spans).
type BucketSpan struct {
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BucketSpan) Reset() { *m = BucketSpan{} }
func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
func (*BucketSpan) ProtoMessage() {}
func (*BucketSpan) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{4}
}
func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BucketSpan) XXX_Merge(src proto.Message) {
xxx_messageInfo_BucketSpan.Merge(m, src)
}
func (m *BucketSpan) XXX_Size() int {
return m.Size()
}
func (m *BucketSpan) XXX_DiscardUnknown() {
xxx_messageInfo_BucketSpan.DiscardUnknown(m)
}
var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
func (m *BucketSpan) GetOffset() int32 {
if m != nil {
return m.Offset
}
return 0
}
func (m *BucketSpan) GetLength() uint32 {
if m != nil {
return m.Length
}
return 0
}
// TimeSeries represents samples and labels for a single time series.
type TimeSeries struct {
// For a timeseries to be valid, and for the samples and exemplars
// to be ingested by the remote system properly, the labels field is required.
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"`
Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{5}
}
func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *TimeSeries) XXX_Merge(src proto.Message) {
xxx_messageInfo_TimeSeries.Merge(m, src)
}
func (m *TimeSeries) XXX_Size() int {
return m.Size()
}
func (m *TimeSeries) XXX_DiscardUnknown() {
xxx_messageInfo_TimeSeries.DiscardUnknown(m)
}
var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
func (m *TimeSeries) GetLabels() []Label {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []Sample {
if m != nil {
return m.Samples
}
return nil
}
func (m *TimeSeries) GetExemplars() []Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
func (m *TimeSeries) GetHistograms() []Histogram {
if m != nil {
return m.Histograms
}
return nil
}
type Label struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Label) Reset() { *m = Label{} }
func (m *Label) String() string { return proto.CompactTextString(m) }
func (*Label) ProtoMessage() {}
func (*Label) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{6}
}
func (m *Label) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Label.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Label) XXX_Merge(src proto.Message) {
xxx_messageInfo_Label.Merge(m, src)
}
func (m *Label) XXX_Size() int {
return m.Size()
}
func (m *Label) XXX_DiscardUnknown() {
xxx_messageInfo_Label.DiscardUnknown(m)
}
var xxx_messageInfo_Label proto.InternalMessageInfo
func (m *Label) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Label) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type Labels struct {
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Labels) Reset() { *m = Labels{} }
func (m *Labels) String() string { return proto.CompactTextString(m) }
func (*Labels) ProtoMessage() {}
func (*Labels) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{7}
}
func (m *Labels) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Labels.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Labels) XXX_Merge(src proto.Message) {
xxx_messageInfo_Labels.Merge(m, src)
}
func (m *Labels) XXX_Size() int {
return m.Size()
}
func (m *Labels) XXX_DiscardUnknown() {
xxx_messageInfo_Labels.DiscardUnknown(m)
}
var xxx_messageInfo_Labels proto.InternalMessageInfo
func (m *Labels) GetLabels() []Label {
if m != nil {
return m.Labels
}
return nil
}
// Matcher specifies a rule, which can match or set of labels or not.
type LabelMatcher struct {
Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{8}
}
func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *LabelMatcher) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelMatcher.Merge(m, src)
}
func (m *LabelMatcher) XXX_Size() int {
return m.Size()
}
func (m *LabelMatcher) XXX_DiscardUnknown() {
xxx_messageInfo_LabelMatcher.DiscardUnknown(m)
}
var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo
func (m *LabelMatcher) GetType() LabelMatcher_Type {
if m != nil {
return m.Type
}
return LabelMatcher_EQ
}
func (m *LabelMatcher) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *LabelMatcher) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type ReadHints struct {
StepMs int64 `protobuf:"varint,1,opt,name=step_ms,json=stepMs,proto3" json:"step_ms,omitempty"`
Func string `protobuf:"bytes,2,opt,name=func,proto3" json:"func,omitempty"`
StartMs int64 `protobuf:"varint,3,opt,name=start_ms,json=startMs,proto3" json:"start_ms,omitempty"`
EndMs int64 `protobuf:"varint,4,opt,name=end_ms,json=endMs,proto3" json:"end_ms,omitempty"`
Grouping []string `protobuf:"bytes,5,rep,name=grouping,proto3" json:"grouping,omitempty"`
By bool `protobuf:"varint,6,opt,name=by,proto3" json:"by,omitempty"`
RangeMs int64 `protobuf:"varint,7,opt,name=range_ms,json=rangeMs,proto3" json:"range_ms,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReadHints) Reset() { *m = ReadHints{} }
func (m *ReadHints) String() string { return proto.CompactTextString(m) }
func (*ReadHints) ProtoMessage() {}
func (*ReadHints) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{9}
}
func (m *ReadHints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReadHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReadHints.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ReadHints) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReadHints.Merge(m, src)
}
func (m *ReadHints) XXX_Size() int {
return m.Size()
}
func (m *ReadHints) XXX_DiscardUnknown() {
xxx_messageInfo_ReadHints.DiscardUnknown(m)
}
var xxx_messageInfo_ReadHints proto.InternalMessageInfo
func (m *ReadHints) GetStepMs() int64 {
if m != nil {
return m.StepMs
}
return 0
}
func (m *ReadHints) GetFunc() string {
if m != nil {
return m.Func
}
return ""
}
func (m *ReadHints) GetStartMs() int64 {
if m != nil {
return m.StartMs
}
return 0
}
func (m *ReadHints) GetEndMs() int64 {
if m != nil {
return m.EndMs
}
return 0
}
func (m *ReadHints) GetGrouping() []string {
if m != nil {
return m.Grouping
}
return nil
}
func (m *ReadHints) GetBy() bool {
if m != nil {
return m.By
}
return false
}
func (m *ReadHints) GetRangeMs() int64 {
if m != nil {
return m.RangeMs
}
return 0
}
// Chunk represents a TSDB chunk.
// Time range [min, max] is inclusive.
type Chunk struct {
MinTimeMs int64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"`
MaxTimeMs int64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"`
Type Chunk_Encoding `protobuf:"varint,3,opt,name=type,proto3,enum=prometheus.Chunk_Encoding" json:"type,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Chunk) Reset() { *m = Chunk{} }
func (m *Chunk) String() string { return proto.CompactTextString(m) }
func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{10}
}
func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Chunk.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Chunk) XXX_Merge(src proto.Message) {
xxx_messageInfo_Chunk.Merge(m, src)
}
func (m *Chunk) XXX_Size() int {
return m.Size()
}
func (m *Chunk) XXX_DiscardUnknown() {
xxx_messageInfo_Chunk.DiscardUnknown(m)
}
var xxx_messageInfo_Chunk proto.InternalMessageInfo
func (m *Chunk) GetMinTimeMs() int64 {
if m != nil {
return m.MinTimeMs
}
return 0
}
func (m *Chunk) GetMaxTimeMs() int64 {
if m != nil {
return m.MaxTimeMs
}
return 0
}
func (m *Chunk) GetType() Chunk_Encoding {
if m != nil {
return m.Type
}
return Chunk_UNKNOWN
}
func (m *Chunk) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
// ChunkedSeries represents single, encoded time series.
type ChunkedSeries struct {
// Labels should be sorted.
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
// Chunks will be in start time order and may overlap.
Chunks []Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} }
func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) }
func (*ChunkedSeries) ProtoMessage() {}
func (*ChunkedSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{11}
}
func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ChunkedSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ChunkedSeries.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ChunkedSeries) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChunkedSeries.Merge(m, src)
}
func (m *ChunkedSeries) XXX_Size() int {
return m.Size()
}
func (m *ChunkedSeries) XXX_DiscardUnknown() {
xxx_messageInfo_ChunkedSeries.DiscardUnknown(m)
}
var xxx_messageInfo_ChunkedSeries proto.InternalMessageInfo
func (m *ChunkedSeries) GetLabels() []Label {
if m != nil {
return m.Labels
}
return nil
}
func (m *ChunkedSeries) GetChunks() []Chunk {
if m != nil {
return m.Chunks
}
return nil
}
func init() {
proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value)
proto.RegisterEnum("prometheus.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value)
proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value)
proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata")
proto.RegisterType((*Sample)(nil), "prometheus.Sample")
proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar")
proto.RegisterType((*Histogram)(nil), "prometheus.Histogram")
proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan")
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/remote.pb.go | prompb/remote.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: remote.proto
package prompb
import (
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ReadRequest_ResponseType int32
const (
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
// It's recommended to use streamed response types instead.
//
// Response headers:
// Content-Type: "application/x-protobuf"
// Content-Encoding: "snappy"
ReadRequest_SAMPLES ReadRequest_ResponseType = 0
// Server will stream a delimited ChunkedReadResponse message that
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
// Each message is following varint size and fixed size bigendian
// uint32 for CRC32 Castagnoli checksum.
//
// Response headers:
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
// Content-Encoding: ""
ReadRequest_STREAMED_XOR_CHUNKS ReadRequest_ResponseType = 1
)
var ReadRequest_ResponseType_name = map[int32]string{
0: "SAMPLES",
1: "STREAMED_XOR_CHUNKS",
}
var ReadRequest_ResponseType_value = map[string]int32{
"SAMPLES": 0,
"STREAMED_XOR_CHUNKS": 1,
}
func (x ReadRequest_ResponseType) String() string {
return proto.EnumName(ReadRequest_ResponseType_name, int32(x))
}
func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1, 0}
}
type WriteRequest struct {
Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
Metadata []MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WriteRequest) Reset() { *m = WriteRequest{} }
func (m *WriteRequest) String() string { return proto.CompactTextString(m) }
func (*WriteRequest) ProtoMessage() {}
func (*WriteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{0}
}
func (m *WriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *WriteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_WriteRequest.Merge(m, src)
}
func (m *WriteRequest) XXX_Size() int {
return m.Size()
}
func (m *WriteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_WriteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_WriteRequest proto.InternalMessageInfo
func (m *WriteRequest) GetTimeseries() []TimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *WriteRequest) GetMetadata() []MetricMetadata {
if m != nil {
return m.Metadata
}
return nil
}
// ReadRequest represents a remote read request.
type ReadRequest struct {
Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
// accepted_response_types allows negotiating the content type of the response.
//
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
// implemented by server, error is returned.
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
AcceptedResponseTypes []ReadRequest_ResponseType `protobuf:"varint,2,rep,packed,name=accepted_response_types,json=acceptedResponseTypes,proto3,enum=prometheus.ReadRequest_ResponseType" json:"accepted_response_types,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1}
}
func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ReadRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReadRequest.Merge(m, src)
}
func (m *ReadRequest) XXX_Size() int {
return m.Size()
}
func (m *ReadRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReadRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReadRequest proto.InternalMessageInfo
func (m *ReadRequest) GetQueries() []*Query {
if m != nil {
return m.Queries
}
return nil
}
func (m *ReadRequest) GetAcceptedResponseTypes() []ReadRequest_ResponseType {
if m != nil {
return m.AcceptedResponseTypes
}
return nil
}
// ReadResponse is a response when response_type equals SAMPLES.
type ReadResponse struct {
// In same order as the request's queries.
Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2}
}
func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ReadResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReadResponse.Merge(m, src)
}
func (m *ReadResponse) XXX_Size() int {
return m.Size()
}
func (m *ReadResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReadResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReadResponse proto.InternalMessageInfo
func (m *ReadResponse) GetResults() []*QueryResult {
if m != nil {
return m.Results
}
return nil
}
type Query struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"`
Hints *ReadHints `protobuf:"bytes,4,opt,name=hints,proto3" json:"hints,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto.CompactTextString(m) }
func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3}
}
func (m *Query) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Query.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Query) XXX_Merge(src proto.Message) {
xxx_messageInfo_Query.Merge(m, src)
}
func (m *Query) XXX_Size() int {
return m.Size()
}
func (m *Query) XXX_DiscardUnknown() {
xxx_messageInfo_Query.DiscardUnknown(m)
}
var xxx_messageInfo_Query proto.InternalMessageInfo
func (m *Query) GetStartTimestampMs() int64 {
if m != nil {
return m.StartTimestampMs
}
return 0
}
func (m *Query) GetEndTimestampMs() int64 {
if m != nil {
return m.EndTimestampMs
}
return 0
}
func (m *Query) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
func (m *Query) GetHints() *ReadHints {
if m != nil {
return m.Hints
}
return nil
}
type QueryResult struct {
// Samples within a time series must be ordered by time.
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{4}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *QueryResult) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryResult.Merge(m, src)
}
func (m *QueryResult) XXX_Size() int {
return m.Size()
}
func (m *QueryResult) XXX_DiscardUnknown() {
xxx_messageInfo_QueryResult.DiscardUnknown(m)
}
var xxx_messageInfo_QueryResult proto.InternalMessageInfo
func (m *QueryResult) GetTimeseries() []*TimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
type ChunkedReadResponse struct {
ChunkedSeries []*ChunkedSeries `protobuf:"bytes,1,rep,name=chunked_series,json=chunkedSeries,proto3" json:"chunked_series,omitempty"`
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
QueryIndex int64 `protobuf:"varint,2,opt,name=query_index,json=queryIndex,proto3" json:"query_index,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} }
func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) }
func (*ChunkedReadResponse) ProtoMessage() {}
func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{5}
}
func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ChunkedReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ChunkedReadResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ChunkedReadResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChunkedReadResponse.Merge(m, src)
}
func (m *ChunkedReadResponse) XXX_Size() int {
return m.Size()
}
func (m *ChunkedReadResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ChunkedReadResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ChunkedReadResponse proto.InternalMessageInfo
func (m *ChunkedReadResponse) GetChunkedSeries() []*ChunkedSeries {
if m != nil {
return m.ChunkedSeries
}
return nil
}
func (m *ChunkedReadResponse) GetQueryIndex() int64 {
if m != nil {
return m.QueryIndex
}
return 0
}
func init() {
proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value)
proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest")
proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse")
proto.RegisterType((*Query)(nil), "prometheus.Query")
proto.RegisterType((*QueryResult)(nil), "prometheus.QueryResult")
proto.RegisterType((*ChunkedReadResponse)(nil), "prometheus.ChunkedReadResponse")
}
func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) }
var fileDescriptor_eefc82927d57d89b = []byte{
// 496 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40,
0x10, 0xee, 0x26, 0x69, 0x13, 0x8d, 0x43, 0x14, 0xb6, 0x2d, 0x09, 0x39, 0xa4, 0x91, 0xc5, 0x21,
0x52, 0x51, 0x10, 0xa1, 0xe2, 0xd4, 0x03, 0x69, 0x89, 0x54, 0xa0, 0xe6, 0x67, 0x13, 0x04, 0x42,
0x48, 0xd6, 0xc6, 0x1e, 0x35, 0x16, 0xf5, 0x4f, 0x77, 0xd7, 0x52, 0xf3, 0x16, 0x3c, 0x13, 0xa7,
0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x2d, 0x5c, 0xb8, 0xad, 0xbf, 0x3f,
0xcf, 0xcc, 0xce, 0x42, 0x53, 0x60, 0x18, 0x2b, 0x1c, 0x25, 0x22, 0x56, 0x31, 0x85, 0x44, 0xc4,
0x21, 0xaa, 0x25, 0xa6, 0xb2, 0x67, 0xa9, 0x55, 0x82, 0x32, 0x27, 0x7a, 0x7b, 0x17, 0xf1, 0x45,
0xac, 0x8f, 0x8f, 0xb2, 0x53, 0x8e, 0xda, 0x5f, 0x09, 0x34, 0x3f, 0x88, 0x40, 0x21, 0xc3, 0xab,
0x14, 0xa5, 0xa2, 0xc7, 0x00, 0x2a, 0x08, 0x51, 0xa2, 0x08, 0x50, 0x76, 0xc9, 0xa0, 0x3a, 0xb4,
0xc6, 0xf7, 0x46, 0x7f, 0x42, 0x47, 0xf3, 0x20, 0xc4, 0x99, 0x66, 0x4f, 0x6a, 0x37, 0x3f, 0x0f,
0xb6, 0x98, 0xa1, 0xa7, 0xc7, 0xd0, 0x08, 0x51, 0x71, 0x9f, 0x2b, 0xde, 0xad, 0x6a, 0x6f, 0xcf,
0xf4, 0x3a, 0xa8, 0x44, 0xe0, 0x39, 0x85, 0xa2, 0xf0, 0x6f, 0x1c, 0x2f, 0x6b, 0x8d, 0x4a, 0xbb,
0x6a, 0x7f, 0x27, 0x60, 0x31, 0xe4, 0x7e, 0x59, 0xd1, 0x21, 0xd4, 0xaf, 0x52, 0xb3, 0x9c, 0xbb,
0x66, 0xe4, 0xbb, 0x14, 0xc5, 0x8a, 0x95, 0x0a, 0xfa, 0x19, 0x3a, 0xdc, 0xf3, 0x30, 0x51, 0xe8,
0xbb, 0x02, 0x65, 0x12, 0x47, 0x12, 0x5d, 0x3d, 0x86, 0x6e, 0x65, 0x50, 0x1d, 0xb6, 0xc6, 0x0f,
0x4c, 0xb3, 0xf1, 0x9b, 0x11, 0x2b, 0xd4, 0xf3, 0x55, 0x82, 0x6c, 0xbf, 0x0c, 0x31, 0x51, 0x69,
0x1f, 0x41, 0xd3, 0x04, 0xa8, 0x05, 0xf5, 0xd9, 0xc4, 0x79, 0x7b, 0x3e, 0x9d, 0xb5, 0xb7, 0x68,
0x07, 0x76, 0x67, 0x73, 0x36, 0x9d, 0x38, 0xd3, 0xe7, 0xee, 0xc7, 0x37, 0xcc, 0x3d, 0x3d, 0x7b,
0xff, 0xfa, 0xd5, 0xac, 0x4d, 0xec, 0x49, 0xe6, 0xe2, 0x9b, 0x28, 0xfa, 0x18, 0xea, 0x02, 0x65,
0x7a, 0xa9, 0xca, 0x86, 0x3a, 0xff, 0x36, 0xa4, 0x79, 0x56, 0xea, 0xec, 0x6f, 0x04, 0xb6, 0x35,
0x41, 0x1f, 0x02, 0x95, 0x8a, 0x0b, 0xe5, 0xea, 0xa9, 0x2b, 0x1e, 0x26, 0x6e, 0x98, 0xe5, 0x90,
0x61, 0x95, 0xb5, 0x35, 0x33, 0x2f, 0x09, 0x47, 0xd2, 0x21, 0xb4, 0x31, 0xf2, 0x6f, 0x6b, 0x2b,
0x5a, 0xdb, 0xc2, 0xc8, 0x37, 0x95, 0x47, 0xd0, 0x08, 0xb9, 0xf2, 0x96, 0x28, 0x64, 0x71, 0x73,
0x5d, 0xb3, 0xaa, 0x73, 0xbe, 0xc0, 0x4b, 0x27, 0x17, 0xb0, 0x8d, 0x92, 0x1e, 0xc2, 0xf6, 0x32,
0x88, 0x94, 0xec, 0xd6, 0x06, 0x64, 0x68, 0x8d, 0xf7, 0xff, 0x1e, 0xee, 0x59, 0x46, 0xb2, 0x5c,
0x63, 0x4f, 0xc1, 0x32, 0x9a, 0xa3, 0x4f, 0xff, 0x7f, 0xd3, 0xcc, 0x1d, 0xb3, 0xaf, 0x61, 0xf7,
0x74, 0x99, 0x46, 0x5f, 0xb2, 0xcb, 0x31, 0xa6, 0xfa, 0x0c, 0x5a, 0x5e, 0x0e, 0xbb, 0xb7, 0x22,
0xef, 0x9b, 0x91, 0x85, 0xb1, 0x48, 0xbd, 0xe3, 0x99, 0x9f, 0xf4, 0x00, 0xac, 0x6c, 0x8d, 0x56,
0x6e, 0x10, 0xf9, 0x78, 0x5d, 0xcc, 0x09, 0x34, 0xf4, 0x22, 0x43, 0x4e, 0xf6, 0x6e, 0xd6, 0x7d,
0xf2, 0x63, 0xdd, 0x27, 0xbf, 0xd6, 0x7d, 0xf2, 0x69, 0x27, 0xcb, 0x4d, 0x16, 0x8b, 0x1d, 0xfd,
0x92, 0x9e, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x13, 0x18, 0x12, 0x0a, 0x88, 0x03, 0x00, 0x00,
}
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Metadata) > 0 {
for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ReadRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.AcceptedResponseTypes) > 0 {
dAtA2 := make([]byte, len(m.AcceptedResponseTypes)*10)
var j1 int
for _, num := range m.AcceptedResponseTypes {
for num >= 1<<7 {
dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j1++
}
dAtA2[j1] = uint8(num)
j1++
}
i -= j1
copy(dAtA[i:], dAtA2[:j1])
i = encodeVarintRemote(dAtA, i, uint64(j1))
i--
dAtA[i] = 0x12
}
if len(m.Queries) > 0 {
for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ReadResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Results) > 0 {
for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Query) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Query) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Query) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Hints != nil {
{
size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
if len(m.Matchers) > 0 {
for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if m.EndTimestampMs != 0 {
i = encodeVarintRemote(dAtA, i, uint64(m.EndTimestampMs))
i--
dAtA[i] = 0x10
}
if m.StartTimestampMs != 0 {
i = encodeVarintRemote(dAtA, i, uint64(m.StartTimestampMs))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *QueryResult) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *QueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ChunkedReadResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ChunkedReadResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ChunkedReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.QueryIndex != 0 {
i = encodeVarintRemote(dAtA, i, uint64(m.QueryIndex))
i--
dAtA[i] = 0x10
}
if len(m.ChunkedSeries) > 0 {
for iNdEx := len(m.ChunkedSeries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ChunkedSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintRemote(dAtA []byte, offset int, v uint64) int {
offset -= sovRemote(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *WriteRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if len(m.Metadata) > 0 {
for _, e := range m.Metadata {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReadRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Queries) > 0 {
for _, e := range m.Queries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if len(m.AcceptedResponseTypes) > 0 {
l = 0
for _, e := range m.AcceptedResponseTypes {
l += sovRemote(uint64(e))
}
n += 1 + sovRemote(uint64(l)) + l
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReadResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Results) > 0 {
for _, e := range m.Results {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Query) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.StartTimestampMs != 0 {
n += 1 + sovRemote(uint64(m.StartTimestampMs))
}
if m.EndTimestampMs != 0 {
n += 1 + sovRemote(uint64(m.EndTimestampMs))
}
if len(m.Matchers) > 0 {
for _, e := range m.Matchers {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if m.Hints != nil {
l = m.Hints.Size()
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *QueryResult) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ChunkedReadResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ChunkedSeries) > 0 {
for _, e := range m.ChunkedSeries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if m.QueryIndex != 0 {
n += 1 + sovRemote(uint64(m.QueryIndex))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovRemote(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozRemote(x uint64) (n int) {
return sovRemote(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *WriteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, TimeSeries{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Metadata = append(m.Metadata, MetricMetadata{})
if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Queries = append(m.Queries, &Query{})
if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType == 0 {
var v ReadRequest_ResponseType
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= ReadRequest_ResponseType(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
if elementCount != 0 && len(m.AcceptedResponseTypes) == 0 {
m.AcceptedResponseTypes = make([]ReadRequest_ResponseType, 0, elementCount)
}
for iNdEx < postIndex {
var v ReadRequest_ResponseType
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= ReadRequest_ResponseType(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field AcceptedResponseTypes", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/codec.go | prompb/codec.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prompb
import (
"strings"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
)
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
// ToLabels return model labels.Labels from timeseries' remote labels.
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
return labelProtosToLabels(b, m.GetLabels())
}
// ToLabels return model labels.Labels from timeseries' remote labels.
func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
return labelProtosToLabels(b, m.GetLabels())
}
func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels {
b.Reset()
for _, l := range labelPairs {
b.Add(l.Name, l.Value)
}
b.Sort()
return b.Labels()
}
// FromLabels transforms labels into prompb labels. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels.
func FromLabels(lbls labels.Labels, buf []Label) []Label {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, Label{
Name: l.Name,
Value: l.Value,
})
})
return result
}
// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
func FromMetadataType(t model.MetricType) MetricMetadata_MetricType {
mt := strings.ToUpper(string(t))
v, ok := MetricMetadata_MetricType_value[mt]
if !ok {
return MetricMetadata_UNKNOWN
}
return MetricMetadata_MetricType(v)
}
// IsFloatHistogram returns true if the histogram is float.
func (h Histogram) IsFloatHistogram() bool {
_, ok := h.GetCount().(*Histogram_CountFloat)
return ok
}
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
// of integer histogram. If it's a float histogram, the method returns nil.
func (h Histogram) ToIntHistogram() *histogram.Histogram {
if h.IsFloatHistogram() {
return nil
}
return &histogram.Histogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountInt(),
Count: h.GetCountInt(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveDeltas(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeDeltas(),
CustomValues: h.CustomValues,
}
}
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
// of float histogram. If the underlying implementation is an integer histogram, a
// conversion is performed.
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
if h.IsFloatHistogram() {
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountFloat(),
Count: h.GetCountFloat(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeCounts(),
CustomValues: h.CustomValues, // CustomValues are immutable.
}
}
// Conversion from integer histogram.
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: float64(h.GetZeroCountInt()),
Count: float64(h.GetCountInt()),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
CustomValues: h.CustomValues, // CustomValues are immutable.
}
}
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := range s {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
func deltasToCounts(deltas []int64) []float64 {
counts := make([]float64, len(deltas))
var cur float64
for i, d := range deltas {
cur += float64(d)
counts[i] = cur
}
return counts
}
// FromIntHistogram returns remote Histogram from the integer Histogram.
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
return Histogram{
Count: &Histogram_CountInt{CountInt: h.Count},
Sum: h.Sum,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
NegativeSpans: spansToSpansProto(h.NegativeSpans),
NegativeDeltas: h.NegativeBuckets,
PositiveSpans: spansToSpansProto(h.PositiveSpans),
PositiveDeltas: h.PositiveBuckets,
ResetHint: Histogram_ResetHint(h.CounterResetHint),
Timestamp: timestamp,
CustomValues: h.CustomValues, // CustomValues are immutable.
}
}
// FromFloatHistogram returns remote Histogram from the float Histogram.
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
return Histogram{
Count: &Histogram_CountFloat{CountFloat: fh.Count},
Sum: fh.Sum,
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
NegativeCounts: fh.NegativeBuckets,
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
Timestamp: timestamp,
CustomValues: fh.CustomValues, // CustomValues are immutable.
}
}
func spansToSpansProto(s []histogram.Span) []BucketSpan {
spans := make([]BucketSpan, len(s))
for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
// ToExemplar converts remote exemplar to model exemplar.
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar {
timestamp := m.Timestamp
return exemplar.Exemplar{
Labels: labelProtosToLabels(b, m.GetLabels()),
Value: m.Value,
Ts: timestamp,
HasTs: timestamp != 0,
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/rwcommon/codec_test.go | prompb/rwcommon/codec_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rwcommon
import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
)
func TestToLabels(t *testing.T) {
expected := labels.FromStrings("__name__", "metric1", "foo", "bar")
t.Run("v1", func(t *testing.T) {
ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}}
b := labels.NewScratchBuilder(2)
require.Equal(t, expected, ts.ToLabels(&b, nil))
require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil))
require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels))
})
t.Run("v2", func(t *testing.T) {
v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"}
ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}}
b := labels.NewScratchBuilder(2)
result, err := ts.ToLabels(&b, v2Symbols)
require.NoError(t, err)
require.Equal(t, expected, result)
// No need for FromLabels in our prod code as we use symbol table to do so.
})
}
func TestFromMetadataType(t *testing.T) {
for _, tc := range []struct {
desc string
input model.MetricType
expectedV1 prompb.MetricMetadata_MetricType
expectedV2 writev2.Metadata_MetricType
}{
{
desc: "with a single-word metric",
input: model.MetricTypeCounter,
expectedV1: prompb.MetricMetadata_COUNTER,
expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER,
},
{
desc: "with a two-word metric",
input: model.MetricTypeStateset,
expectedV1: prompb.MetricMetadata_STATESET,
expectedV2: writev2.Metadata_METRIC_TYPE_STATESET,
},
{
desc: "with an unknown metric",
input: "not-known",
expectedV1: prompb.MetricMetadata_UNKNOWN,
expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED,
},
} {
t.Run(tc.desc, func(t *testing.T) {
t.Run("v1", func(t *testing.T) {
require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input))
})
t.Run("v2", func(t *testing.T) {
require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input))
})
})
}
}
func TestToMetadata(t *testing.T) {
sym := writev2.NewSymbolTable()
for _, tc := range []struct {
input writev2.Metadata
expected metadata.Metadata
}{
{
input: writev2.Metadata{},
expected: metadata.Metadata{
Type: model.MetricTypeUnknown,
},
},
{
input: writev2.Metadata{
Type: 12414, // Unknown.
},
expected: metadata.Metadata{
Type: model.MetricTypeUnknown,
},
},
{
input: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER,
HelpRef: sym.Symbolize("help1"),
UnitRef: sym.Symbolize("unit1"),
},
expected: metadata.Metadata{
Type: model.MetricTypeCounter,
Help: "help1",
Unit: "unit1",
},
},
{
input: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_STATESET,
HelpRef: sym.Symbolize("help2"),
},
expected: metadata.Metadata{
Type: model.MetricTypeStateset,
Help: "help2",
},
},
} {
t.Run("", func(t *testing.T) {
ts := writev2.TimeSeries{Metadata: tc.input}
require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols()))
})
}
}
func TestToHistogram_Empty(t *testing.T) {
t.Run("v1", func(t *testing.T) {
require.NotNil(t, prompb.Histogram{}.ToIntHistogram())
require.NotNil(t, prompb.Histogram{}.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
require.NotNil(t, writev2.Histogram{}.ToIntHistogram())
require.NotNil(t, writev2.Histogram{}.ToFloatHistogram())
})
}
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
// important cases to test when copying or converting to/from int/float histograms.
func testIntHistogram() histogram.Histogram {
return histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Schema: 1,
Count: 19,
Sum: 2.7,
ZeroThreshold: 1e-128,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 5},
{Offset: 1, Length: 0},
{Offset: 0, Length: 1},
},
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
CustomValues: []float64{21421, 523},
}
}
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
// important cases to test when copying or converting to/from int/float histograms.
func testFloatHistogram() histogram.FloatHistogram {
return histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 1,
Count: 19,
Sum: 2.7,
ZeroThreshold: 1e-128,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 5},
{Offset: 1, Length: 0},
{Offset: 0, Length: 1},
},
NegativeBuckets: []float64{1, 3, 1, 2, 1, 1},
CustomValues: []float64{21421, 523},
}
}
func TestFromIntToFloatOrIntHistogram(t *testing.T) {
t.Run("v1", func(t *testing.T) {
testIntHist := testIntHistogram()
testFloatHist := testFloatHistogram()
h := prompb.FromIntHistogram(123, &testIntHist)
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Equal(t, testIntHist, *h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
testIntHist := testIntHistogram()
testFloatHist := testFloatHistogram()
h := writev2.FromIntHistogram(123, &testIntHist)
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Equal(t, testIntHist, *h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
}
func TestFromFloatToFloatHistogram(t *testing.T) {
t.Run("v1", func(t *testing.T) {
testFloatHist := testFloatHistogram()
h := prompb.FromFloatHistogram(123, &testFloatHist)
require.True(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Nil(t, h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
testFloatHist := testFloatHistogram()
h := writev2.FromFloatHistogram(123, &testFloatHist)
require.True(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Nil(t, h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
}
func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) {
for _, tc := range []struct {
input histogram.CounterResetHint
expectedV1 prompb.Histogram_ResetHint
expectedV2 writev2.Histogram_ResetHint
}{
{
input: histogram.UnknownCounterReset,
expectedV1: prompb.Histogram_UNKNOWN,
expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED,
},
{
input: histogram.CounterReset,
expectedV1: prompb.Histogram_YES,
expectedV2: writev2.Histogram_RESET_HINT_YES,
},
{
input: histogram.NotCounterReset,
expectedV1: prompb.Histogram_NO,
expectedV2: writev2.Histogram_RESET_HINT_NO,
},
{
input: histogram.GaugeType,
expectedV1: prompb.Histogram_GAUGE,
expectedV2: writev2.Histogram_RESET_HINT_GAUGE,
},
} {
t.Run("", func(t *testing.T) {
t.Run("v1", func(t *testing.T) {
h := testIntHistogram()
h.CounterResetHint = tc.input
got := prompb.FromIntHistogram(1337, &h)
require.Equal(t, tc.expectedV1, got.GetResetHint())
fh := testFloatHistogram()
fh.CounterResetHint = tc.input
got2 := prompb.FromFloatHistogram(1337, &fh)
require.Equal(t, tc.expectedV1, got2.GetResetHint())
})
t.Run("v2", func(t *testing.T) {
h := testIntHistogram()
h.CounterResetHint = tc.input
got := writev2.FromIntHistogram(1337, &h)
require.Equal(t, tc.expectedV2, got.GetResetHint())
fh := testFloatHistogram()
fh.CounterResetHint = tc.input
got2 := writev2.FromFloatHistogram(1337, &fh)
require.Equal(t, tc.expectedV2, got2.GetResetHint())
})
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/write/v2/types_test.go | prompb/io/prometheus/write/v2/types_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/prompb"
)
func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) {
expectedV1Empty := &prompb.WriteRequest{}
for _, tc := range []struct{ incoming *Request }{
{
incoming: &Request{}, // Technically wrong, should be at least empty string in symbol.
},
{
incoming: &Request{
Symbols: []string{""},
}, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element.
},
{
incoming: &Request{
Symbols: []string{"", "__name__", "metric1"},
Timeseries: []TimeSeries{
{LabelsRefs: []uint32{1, 2}},
{Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}},
}, // NOTE: Without reserved fields, proto: illegal wireType 7
},
},
} {
t.Run("", func(t *testing.T) {
in, err := proto.Marshal(tc.incoming)
require.NoError(t, err)
// Test accidental unmarshal of v2 payload with v1 proto.
out := &prompb.WriteRequest{}
require.NoError(t, proto.Unmarshal(in, out))
// Drop unknowns, we expect them when incoming payload had some fields.
// This field & method will be likely gone after gogo removal.
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
require.Equal(t, expectedV1Empty, out)
})
}
}
func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) {
expectedV2Empty := &Request{}
for _, tc := range []struct{ incoming *prompb.WriteRequest }{
{
incoming: &prompb.WriteRequest{},
},
{
incoming: &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{
{
Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}},
Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}},
},
},
},
// NOTE: Without reserved fields, results in corrupted v2.Request.Symbols.
},
} {
t.Run("", func(t *testing.T) {
in, err := proto.Marshal(tc.incoming)
require.NoError(t, err)
// Test accidental unmarshal of v1 payload with v2 proto.
out := &Request{}
require.NoError(t, proto.Unmarshal(in, out))
// Drop unknowns, we expect them when incoming payload had some fields.
// This field & method will be likely gone after gogo removal.
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
require.Equal(t, expectedV2Empty, out)
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/write/v2/custom.go | prompb/io/prometheus/write/v2/custom.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"slices"
)
func (m Sample) T() int64 { return m.Timestamp }
func (m Sample) V() float64 { return m.Value }
func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) {
siz := m.Size()
if cap(dst) < siz {
dst = make([]byte, siz)
}
n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz])
if err != nil {
return nil, err
}
return dst[:n], nil
}
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
// but calls OptimizedMarshalToSizedBuffer on the timeseries.
func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
}
if len(m.Symbols) > 0 {
for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Symbols[iNdEx])
copy(dAtA[i:], m.Symbols[iNdEx])
i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx])))
i--
dAtA[i] = 0x22
}
}
return len(dAtA) - i, nil
}
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
// but marshals m.LabelsRefs in place without extra allocations.
func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
if len(m.Histograms) > 0 {
for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if len(m.Samples) > 0 {
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.LabelsRefs) > 0 {
// This is the trick: encode the varints in reverse order to make it easier
// to do it in place. Then reverse the whole thing.
var j10 int
start := i
for _, num := range m.LabelsRefs {
for num >= 1<<7 {
dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
i--
j10++
}
dAtA[i-1] = uint8(num)
i--
j10++
}
slices.Reverse(dAtA[i:start])
// --- end of trick
i = encodeVarintTypes(dAtA, i, uint64(j10))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/write/v2/types.pb.go | prompb/io/prometheus/write/v2/types.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: io/prometheus/write/v2/types.proto
package writev2
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Metadata_MetricType int32
const (
Metadata_METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0
Metadata_METRIC_TYPE_COUNTER Metadata_MetricType = 1
Metadata_METRIC_TYPE_GAUGE Metadata_MetricType = 2
Metadata_METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3
Metadata_METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4
Metadata_METRIC_TYPE_SUMMARY Metadata_MetricType = 5
Metadata_METRIC_TYPE_INFO Metadata_MetricType = 6
Metadata_METRIC_TYPE_STATESET Metadata_MetricType = 7
)
var Metadata_MetricType_name = map[int32]string{
0: "METRIC_TYPE_UNSPECIFIED",
1: "METRIC_TYPE_COUNTER",
2: "METRIC_TYPE_GAUGE",
3: "METRIC_TYPE_HISTOGRAM",
4: "METRIC_TYPE_GAUGEHISTOGRAM",
5: "METRIC_TYPE_SUMMARY",
6: "METRIC_TYPE_INFO",
7: "METRIC_TYPE_STATESET",
}
var Metadata_MetricType_value = map[string]int32{
"METRIC_TYPE_UNSPECIFIED": 0,
"METRIC_TYPE_COUNTER": 1,
"METRIC_TYPE_GAUGE": 2,
"METRIC_TYPE_HISTOGRAM": 3,
"METRIC_TYPE_GAUGEHISTOGRAM": 4,
"METRIC_TYPE_SUMMARY": 5,
"METRIC_TYPE_INFO": 6,
"METRIC_TYPE_STATESET": 7,
}
func (x Metadata_MetricType) String() string {
return proto.EnumName(Metadata_MetricType_name, int32(x))
}
func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{4, 0}
}
type Histogram_ResetHint int32
const (
Histogram_RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0
Histogram_RESET_HINT_YES Histogram_ResetHint = 1
Histogram_RESET_HINT_NO Histogram_ResetHint = 2
Histogram_RESET_HINT_GAUGE Histogram_ResetHint = 3
)
var Histogram_ResetHint_name = map[int32]string{
0: "RESET_HINT_UNSPECIFIED",
1: "RESET_HINT_YES",
2: "RESET_HINT_NO",
3: "RESET_HINT_GAUGE",
}
var Histogram_ResetHint_value = map[string]int32{
"RESET_HINT_UNSPECIFIED": 0,
"RESET_HINT_YES": 1,
"RESET_HINT_NO": 2,
"RESET_HINT_GAUGE": 3,
}
func (x Histogram_ResetHint) String() string {
return proto.EnumName(Histogram_ResetHint_name, int32(x))
}
func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{5, 0}
}
// Request represents a request to write the given timeseries to a remote destination.
// This message was introduced in the Remote Write 2.0 specification:
// https://prometheus.io/docs/concepts/remote_write_spec_2_0/
//
// The canonical Content-Type request header value for this message is
// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
//
// Version: v2.0-rc.4
//
// NOTE: gogoproto options might change in future for this file, they
// are not part of the spec proto (they only modify the generated Go code, not
// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
type Request struct {
// symbols contains a de-duplicated array of string elements used for various
// items in a Request message, like labels and metadata items. For the sender's convenience
// around empty values for optional fields like unit_ref, symbols array MUST start with
// empty string.
//
// To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you
// need to lookup the actual string by index from symbols array. The order of
// strings is up to the sender. The receiver should not assume any particular encoding.
Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"`
// timeseries represents an array of distinct series with 0 or more samples.
Timeseries []TimeSeries `protobuf:"bytes,5,rep,name=timeseries,proto3" json:"timeseries"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{0}
}
func (m *Request) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Request.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_Request.Merge(m, src)
}
func (m *Request) XXX_Size() int {
return m.Size()
}
func (m *Request) XXX_DiscardUnknown() {
xxx_messageInfo_Request.DiscardUnknown(m)
}
var xxx_messageInfo_Request proto.InternalMessageInfo
func (m *Request) GetSymbols() []string {
if m != nil {
return m.Symbols
}
return nil
}
func (m *Request) GetTimeseries() []TimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
// TimeSeries represents a single series.
type TimeSeries struct {
// labels_refs is a list of label name-value pair references, encoded
// as indices to the Request.symbols array. This list's length is always
// a multiple of two, and the underlying labels should be sorted lexicographically.
//
// Note that there might be multiple TimeSeries objects in the same
// Requests with the same labels e.g. for different exemplars, metadata
// or start timestamp.
LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
// Timeseries messages can either specify samples or (native) histogram samples
// (histogram field), but not both. For a typical sender (real-time metric
// streaming), in healthy cases, there will be only one sample or histogram.
//
// Samples and histograms are sorted by timestamp (older first).
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"`
// exemplars represents an optional set of exemplars attached to this series' samples.
Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"`
// metadata represents the metadata associated with the given series' samples.
Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{1}
}
func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *TimeSeries) XXX_Merge(src proto.Message) {
xxx_messageInfo_TimeSeries.Merge(m, src)
}
func (m *TimeSeries) XXX_Size() int {
return m.Size()
}
func (m *TimeSeries) XXX_DiscardUnknown() {
xxx_messageInfo_TimeSeries.DiscardUnknown(m)
}
var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
func (m *TimeSeries) GetLabelsRefs() []uint32 {
if m != nil {
return m.LabelsRefs
}
return nil
}
func (m *TimeSeries) GetSamples() []Sample {
if m != nil {
return m.Samples
}
return nil
}
func (m *TimeSeries) GetHistograms() []Histogram {
if m != nil {
return m.Histograms
}
return nil
}
func (m *TimeSeries) GetExemplars() []Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
func (m *TimeSeries) GetMetadata() Metadata {
if m != nil {
return m.Metadata
}
return Metadata{}
}
// Exemplar is an additional information attached to some series' samples.
// It is typically used to attach an example trace or request ID associated with
// the metric changes.
type Exemplar struct {
// labels_refs is an optional list of label name-value pair references, encoded
// as indices to the Request.symbols array. This list's len is always
// a multiple of 2, and the underlying labels should be sorted lexicographically.
// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
// timestamp represents the timestamp of the exemplar in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{2}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return m.Size()
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabelsRefs() []uint32 {
if m != nil {
return m.LabelsRefs
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
// Sample represents series sample.
type Sample struct {
// value of the sample.
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
// timestamp represents timestamp of the sample in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// start_timestamp represents an optional start timestamp for the sample,
// in ms format. This information is typically used for counter, histogram (cumulative)
// or delta type metrics.
//
// For cumulative metrics, the start timestamp represents the time when the
// counter started counting (sometimes referred to as start timestamp), which
// can increase the accuracy of certain processing and query semantics (e.g. rates).
//
// Note:
// * That some receivers might require start timestamps for certain metric
// types; rejecting such samples within the Request as a result.
// * start timestamp is the same as "created timestamp" name Prometheus used in the past.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to efficiency and consistency.
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
StartTimestamp int64 `protobuf:"varint,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{3}
}
func (m *Sample) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Sample) XXX_Merge(src proto.Message) {
xxx_messageInfo_Sample.Merge(m, src)
}
func (m *Sample) XXX_Size() int {
return m.Size()
}
func (m *Sample) XXX_DiscardUnknown() {
xxx_messageInfo_Sample.DiscardUnknown(m)
}
var xxx_messageInfo_Sample proto.InternalMessageInfo
func (m *Sample) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Sample) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *Sample) GetStartTimestamp() int64 {
if m != nil {
return m.StartTimestamp
}
return 0
}
// Metadata represents the metadata associated with the given series' samples.
type Metadata struct {
Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"`
// help_ref is a reference to the Request.symbols array representing help
// text for the metric. Help is optional, reference should point to an empty string in
// such a case.
HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"`
// unit_ref is a reference to the Request.symbols array representing a unit
// for the metric. Unit is optional, reference should point to an empty string in
// such a case.
UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {}
func (*Metadata) Descriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{4}
}
func (m *Metadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Metadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metadata.Merge(m, src)
}
func (m *Metadata) XXX_Size() int {
return m.Size()
}
func (m *Metadata) XXX_DiscardUnknown() {
xxx_messageInfo_Metadata.DiscardUnknown(m)
}
var xxx_messageInfo_Metadata proto.InternalMessageInfo
func (m *Metadata) GetType() Metadata_MetricType {
if m != nil {
return m.Type
}
return Metadata_METRIC_TYPE_UNSPECIFIED
}
func (m *Metadata) GetHelpRef() uint32 {
if m != nil {
return m.HelpRef
}
return 0
}
func (m *Metadata) GetUnitRef() uint32 {
if m != nil {
return m.UnitRef
}
return 0
}
// A native histogram message, supporting
// * sparse exponential bucketing, custom bucketing.
// * float or integer histograms.
//
// See the full spec: https://prometheus.io/docs/specs/native_histograms/
type Histogram struct {
// Types that are valid to be assigned to Count:
//
// *Histogram_CountInt
// *Histogram_CountFloat
Count isHistogram_Count `protobuf_oneof:"count"`
Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"`
// The schema defines the bucket schema. Currently, valid numbers
// are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
// added in future for new bucketing layouts.
//
// The schema equal to -53 means custom buckets. See
// custom_values field description for more details.
//
// Values between -4 and 8 represent base-2 bucket schema, where 1
// is a bucket boundary in each case, and then each power of two is
// divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
// each bucket boundary is the previous boundary times 2^(2^-n).
Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
// Types that are valid to be assigned to ZeroCount:
//
// *Histogram_ZeroCountInt
// *Histogram_ZeroCountFloat
ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
// Negative Buckets.
NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
// Use either "negative_deltas" or "negative_counts", the former for
// regular histograms with integer counts, the latter for
// float histograms.
NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
// Positive Buckets.
//
// In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
// * The span offset+length points to an the index of the custom_values array
// or +Inf if pointing to the len of the array.
// * The counts and deltas have the same meaning as for exponential histograms.
PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
// Use either "positive_deltas" or "positive_counts", the former for
// regular histograms with integer counts, the latter for
// float histograms.
PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"`
PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`
ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=io.prometheus.write.v2.Histogram_ResetHint" json:"reset_hint,omitempty"`
// timestamp represents timestamp of the sample in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// custom_values is an additional field used by non-exponential bucketing layouts.
//
// For custom buckets (-53 schema value) custom_values specify monotonically
// increasing upper inclusive boundaries for the bucket counts with arbitrary
// widths for this histogram. In other words, custom_values represents custom,
// explicit bucketing that could have been converted from the classic histograms.
//
// Those bounds are then referenced by spans in positive_spans with corresponding positive
// counts of deltas (refer to positive_spans for more details). This way we can
// have encode sparse histograms with custom bucketing (many buckets are often
// not used).
//
// Note that for custom bounds, even negative observations are placed in the positive
// counts to simplify the implementation and avoid ambiguity of where to place
// an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
// the zero bucket are unused, if the schema indicates custom bucketing.
//
// For each upper boundary the previous boundary represent the lower exclusive
// boundary for that bucket. The first element is the upper inclusive boundary
// for the first bucket, which implicitly has a lower inclusive bound of -Inf.
// This is similar to "le" label semantics on classic histograms. You may add a
// bucket with an upper bound of 0 to make sure that you really have no negative
// observations, but in practice, native histogram rendering will show both with
// or without first upper boundary 0 and no negative counts as the same case.
//
// The last element is not only the upper inclusive bound of the last regular
// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
// start_timestamp represents an optional start timestamp for the histogram sample,
// in ms format. The start timestamp represents the time when the histogram
// started counting, which can increase the accuracy of certain processing and
// query semantics (e.g. rates).
//
// Note:
// * That some receivers might require start timestamps for certain metric
// types; rejecting such samples within the Request as a result.
// * start timestamp is the same as "created timestamp" name Prometheus used in the past.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to efficiency and consistency.
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
StartTimestamp int64 `protobuf:"varint,17,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{5}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return m.Size()
}
func (m *Histogram) XXX_DiscardUnknown() {
xxx_messageInfo_Histogram.DiscardUnknown(m)
}
var xxx_messageInfo_Histogram proto.InternalMessageInfo
type isHistogram_Count interface {
isHistogram_Count()
MarshalTo([]byte) (int, error)
Size() int
}
type isHistogram_ZeroCount interface {
isHistogram_ZeroCount()
MarshalTo([]byte) (int, error)
Size() int
}
type Histogram_CountInt struct {
CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"`
}
type Histogram_CountFloat struct {
CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"`
}
type Histogram_ZeroCountInt struct {
ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"`
}
type Histogram_ZeroCountFloat struct {
ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"`
}
func (*Histogram_CountInt) isHistogram_Count() {}
func (*Histogram_CountFloat) isHistogram_Count() {}
func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {}
func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
func (m *Histogram) GetCount() isHistogram_Count {
if m != nil {
return m.Count
}
return nil
}
func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
if m != nil {
return m.ZeroCount
}
return nil
}
func (m *Histogram) GetCountInt() uint64 {
if x, ok := m.GetCount().(*Histogram_CountInt); ok {
return x.CountInt
}
return 0
}
func (m *Histogram) GetCountFloat() float64 {
if x, ok := m.GetCount().(*Histogram_CountFloat); ok {
return x.CountFloat
}
return 0
}
func (m *Histogram) GetSum() float64 {
if m != nil {
return m.Sum
}
return 0
}
func (m *Histogram) GetSchema() int32 {
if m != nil {
return m.Schema
}
return 0
}
func (m *Histogram) GetZeroThreshold() float64 {
if m != nil {
return m.ZeroThreshold
}
return 0
}
func (m *Histogram) GetZeroCountInt() uint64 {
if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok {
return x.ZeroCountInt
}
return 0
}
func (m *Histogram) GetZeroCountFloat() float64 {
if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
return x.ZeroCountFloat
}
return 0
}
func (m *Histogram) GetNegativeSpans() []BucketSpan {
if m != nil {
return m.NegativeSpans
}
return nil
}
func (m *Histogram) GetNegativeDeltas() []int64 {
if m != nil {
return m.NegativeDeltas
}
return nil
}
func (m *Histogram) GetNegativeCounts() []float64 {
if m != nil {
return m.NegativeCounts
}
return nil
}
func (m *Histogram) GetPositiveSpans() []BucketSpan {
if m != nil {
return m.PositiveSpans
}
return nil
}
func (m *Histogram) GetPositiveDeltas() []int64 {
if m != nil {
return m.PositiveDeltas
}
return nil
}
func (m *Histogram) GetPositiveCounts() []float64 {
if m != nil {
return m.PositiveCounts
}
return nil
}
func (m *Histogram) GetResetHint() Histogram_ResetHint {
if m != nil {
return m.ResetHint
}
return Histogram_RESET_HINT_UNSPECIFIED
}
func (m *Histogram) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *Histogram) GetCustomValues() []float64 {
if m != nil {
return m.CustomValues
}
return nil
}
func (m *Histogram) GetStartTimestamp() int64 {
if m != nil {
return m.StartTimestamp
}
return 0
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Histogram) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Histogram_CountInt)(nil),
(*Histogram_CountFloat)(nil),
(*Histogram_ZeroCountInt)(nil),
(*Histogram_ZeroCountFloat)(nil),
}
}
// A BucketSpan defines a number of consecutive buckets with their
// offset. Logically, it would be more straightforward to include the
// bucket counts in the Span. However, the protobuf representation is
// more compact in the way the data is structured here (with all the
// buckets in a single array separate from the Spans).
type BucketSpan struct {
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BucketSpan) Reset() { *m = BucketSpan{} }
func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
func (*BucketSpan) ProtoMessage() {}
func (*BucketSpan) Descriptor() ([]byte, []int) {
return fileDescriptor_f139519efd9fa8d7, []int{6}
}
func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BucketSpan) XXX_Merge(src proto.Message) {
xxx_messageInfo_BucketSpan.Merge(m, src)
}
func (m *BucketSpan) XXX_Size() int {
return m.Size()
}
func (m *BucketSpan) XXX_DiscardUnknown() {
xxx_messageInfo_BucketSpan.DiscardUnknown(m)
}
var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
func (m *BucketSpan) GetOffset() int32 {
if m != nil {
return m.Offset
}
return 0
}
func (m *BucketSpan) GetLength() uint32 {
if m != nil {
return m.Length
}
return 0
}
func init() {
proto.RegisterEnum("io.prometheus.write.v2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value)
proto.RegisterEnum("io.prometheus.write.v2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
proto.RegisterType((*Request)(nil), "io.prometheus.write.v2.Request")
proto.RegisterType((*TimeSeries)(nil), "io.prometheus.write.v2.TimeSeries")
proto.RegisterType((*Exemplar)(nil), "io.prometheus.write.v2.Exemplar")
proto.RegisterType((*Sample)(nil), "io.prometheus.write.v2.Sample")
proto.RegisterType((*Metadata)(nil), "io.prometheus.write.v2.Metadata")
proto.RegisterType((*Histogram)(nil), "io.prometheus.write.v2.Histogram")
proto.RegisterType((*BucketSpan)(nil), "io.prometheus.write.v2.BucketSpan")
}
func init() {
proto.RegisterFile("io/prometheus/write/v2/types.proto", fileDescriptor_f139519efd9fa8d7)
}
var fileDescriptor_f139519efd9fa8d7 = []byte{
// 931 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0xe3, 0x44,
0x14, 0xed, 0xc4, 0xf9, 0xbc, 0x69, 0xb2, 0xce, 0xd0, 0x76, 0xbd, 0x05, 0xb2, 0xd9, 0x20, 0x20,
0x02, 0x29, 0x91, 0xc2, 0x2b, 0x02, 0x35, 0xad, 0xdb, 0xa4, 0x52, 0x92, 0xd5, 0xc4, 0x45, 0x2a,
0x2f, 0x96, 0x9b, 0x4e, 0x12, 0x0b, 0x3b, 0x36, 0x9e, 0x49, 0xa0, 0xfc, 0x40, 0xb4, 0x8f, 0xfc,
0x01, 0x10, 0xf4, 0x9d, 0xff, 0x80, 0x66, 0xfc, 0xd9, 0xd0, 0x76, 0xb5, 0x6f, 0x33, 0xe7, 0x9e,
0x73, 0xef, 0xc9, 0xf5, 0xbd, 0x13, 0x68, 0xdb, 0x5e, 0xcf, 0x0f, 0x3c, 0x97, 0xf2, 0x15, 0xdd,
0xb0, 0xde, 0x2f, 0x81, 0xcd, 0x69, 0x6f, 0xdb, 0xef, 0xf1, 0x3b, 0x9f, 0xb2, 0xae, 0x1f, 0x78,
0xdc, 0xc3, 0x47, 0xb6, 0xd7, 0x4d, 0x39, 0x5d, 0xc9, 0xe9, 0x6e, 0xfb, 0xc7, 0x07, 0x4b, 0x6f,
0xe9, 0x49, 0x4a, 0x4f, 0x9c, 0x42, 0x76, 0x9b, 0x41, 0x89, 0xd0, 0x9f, 0x37, 0x94, 0x71, 0xac,
0x41, 0x89, 0xdd, 0xb9, 0x37, 0x9e, 0xc3, 0xb4, 0x7c, 0x4b, 0xe9, 0x54, 0x48, 0x7c, 0xc5, 0x43,
0x00, 0x6e, 0xbb, 0x94, 0xd1, 0xc0, 0xa6, 0x4c, 0x2b, 0xb4, 0x94, 0x4e, 0xb5, 0xdf, 0xee, 0x3e,
0x5e, 0xa7, 0x6b, 0xd8, 0x2e, 0x9d, 0x49, 0xe6, 0x20, 0xff, 0xee, 0xaf, 0xd7, 0x7b, 0x24, 0xa3,
0xbd, 0xcc, 0x97, 0x91, 0x9a, 0x6f, 0xff, 0x9e, 0x03, 0x48, 0x69, 0xf8, 0x35, 0x54, 0x1d, 0xeb,
0x86, 0x3a, 0xcc, 0x0c, 0xe8, 0x82, 0x69, 0xa8, 0xa5, 0x74, 0x6a, 0x04, 0x42, 0x88, 0xd0, 0x05,
0xc3, 0xdf, 0x41, 0x89, 0x59, 0xae, 0xef, 0x50, 0xa6, 0xe5, 0x64, 0xf1, 0xe6, 0x53, 0xc5, 0x67,
0x92, 0x16, 0x15, 0x8e, 0x45, 0xf8, 0x02, 0x60, 0x65, 0x33, 0xee, 0x2d, 0x03, 0xcb, 0x65, 0x9a,
0x22, 0x53, 0xbc, 0x79, 0x2a, 0xc5, 0x30, 0x66, 0xc6, 0xf6, 0x53, 0x29, 0x3e, 0x83, 0x0a, 0xfd,
0x95, 0xba, 0xbe, 0x63, 0x05, 0x61, 0x93, 0xaa, 0xfd, 0xd6, 0x53, 0x79, 0xf4, 0x88, 0x18, 0xa5,
0x49, 0x85, 0x78, 0x00, 0x65, 0x97, 0x72, 0xeb, 0xd6, 0xe2, 0x96, 0x56, 0x68, 0xa1, 0xe7, 0x92,
0x8c, 0x23, 0x5e, 0x94, 0x24, 0xd1, 0x5d, 0xe6, 0xcb, 0x45, 0xb5, 0xd4, 0x36, 0xa1, 0x1c, 0x97,
0x79, 0x7f, 0x17, 0x0f, 0xa0, 0xb0, 0xb5, 0x9c, 0x0d, 0xd5, 0x72, 0x2d, 0xd4, 0x41, 0x24, 0xbc,
0xe0, 0x4f, 0xa0, 0x22, 0xbf, 0x0f, 0xb7, 0x5c, 0x5f, 0x53, 0x5a, 0xa8, 0xa3, 0x90, 0x14, 0x68,
0x53, 0x28, 0x86, 0x2d, 0x4d, 0xd5, 0xe8, 0x49, 0x75, 0x6e, 0x47, 0x8d, 0xbf, 0x84, 0x17, 0x8c,
0x5b, 0x01, 0x37, 0x77, 0x2b, 0xd4, 0x25, 0x6c, 0x24, 0x65, 0xfe, 0xc9, 0x41, 0x39, 0xfe, 0xa9,
0xf8, 0x7b, 0xc8, 0x8b, 0x79, 0x96, 0x85, 0xea, 0xfd, 0xaf, 0xdf, 0xd7, 0x1a, 0x71, 0x08, 0xec,
0xb9, 0x71, 0xe7, 0x53, 0x22, 0x85, 0xf8, 0x15, 0x94, 0x57, 0xd4, 0xf1, 0x45, 0x1f, 0x64, 0xbd,
0x1a, 0x29, 0x89, 0x3b, 0xa1, 0x0b, 0x11, 0xda, 0xac, 0x6d, 0x2e, 0x43, 0xf9, 0x30, 0x24, 0xee,
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/write/v2/symbols_test.go | prompb/io/prometheus/write/v2/symbols_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
)
func TestSymbolsTable(t *testing.T) {
s := NewSymbolTable()
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
require.Equal(t, uint32(0), s.Symbolize(""))
require.Equal(t, []string{""}, s.Symbols())
require.Equal(t, uint32(1), s.Symbolize("abc"))
require.Equal(t, []string{"", "abc"}, s.Symbols())
require.Equal(t, uint32(2), s.Symbolize("__name__"))
require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols())
require.Equal(t, uint32(3), s.Symbolize("foo"))
require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols())
s.Reset()
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
require.Equal(t, uint32(0), s.Symbolize(""))
require.Equal(t, uint32(1), s.Symbolize("__name__"))
require.Equal(t, []string{"", "__name__"}, s.Symbols())
require.Equal(t, uint32(2), s.Symbolize("abc"))
require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols())
ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234")
encoded := s.SymbolizeLabels(ls, nil)
require.Equal(t, []uint32{1, 3, 4, 5}, encoded)
b := labels.NewScratchBuilder(len(encoded))
decoded, err := desymbolizeLabels(&b, encoded, s.Symbols())
require.NoError(t, err)
require.Equal(t, ls, decoded)
// Different buf.
ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234")
encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5})
require.Equal(t, []uint32{1, 3, 6, 5}, encoded)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/write/v2/symbols.go | prompb/io/prometheus/write/v2/symbols.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"fmt"
"github.com/prometheus/prometheus/model/labels"
)
// SymbolsTable implements table for easy symbol use.
type SymbolsTable struct {
strings []string
symbolsMap map[string]uint32
}
// NewSymbolTable returns a symbol table.
func NewSymbolTable() SymbolsTable {
return SymbolsTable{
// Empty string is required as a first element.
symbolsMap: map[string]uint32{"": 0},
strings: []string{""},
}
}
// Symbolize adds (if not added before) a string to the symbols table,
// while returning its reference number.
func (t *SymbolsTable) Symbolize(str string) uint32 {
if ref, ok := t.symbolsMap[str]; ok {
return ref
}
ref := uint32(len(t.strings))
t.strings = append(t.strings, str)
t.symbolsMap[str] = ref
return ref
}
// SymbolizeLabels symbolize Prometheus labels.
func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
off := t.Symbolize(l.Name)
result = append(result, off)
off = t.Symbolize(l.Value)
result = append(result, off)
})
return result
}
// Symbols returns computes symbols table to put in e.g. Request.Symbols.
// As per spec, order does not matter.
func (t *SymbolsTable) Symbols() []string {
return t.strings
}
// Reset clears symbols table.
func (t *SymbolsTable) Reset() {
// NOTE: Make sure to keep empty symbol.
t.strings = t.strings[:1]
for k := range t.symbolsMap {
if k == "" {
continue
}
delete(t.symbolsMap, k)
}
}
// desymbolizeLabels decodes label references, with given symbols to labels.
// This function requires labelRefs to have an even number of elements (name-value pairs) and
// all references must be valid indices within the symbols table. It will return an error if
// these invariants are violated.
func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) (labels.Labels, error) {
if len(labelRefs)%2 != 0 {
return labels.EmptyLabels(), fmt.Errorf("invalid labelRefs length %d", len(labelRefs))
}
b.Reset()
for i := 0; i < len(labelRefs); i += 2 {
nameRef, valueRef := labelRefs[i], labelRefs[i+1]
if int(nameRef) >= len(symbols) || int(valueRef) >= len(symbols) {
return labels.EmptyLabels(), fmt.Errorf("labelRefs %d (name) = %d (value) outside of symbols table (size %d)", nameRef, valueRef, len(symbols))
}
b.Add(symbols[nameRef], symbols[valueRef])
}
b.Sort()
return b.Labels(), nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/write/v2/custom_test.go | prompb/io/prometheus/write/v2/custom_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestOptimizedMarshal(t *testing.T) {
for _, tt := range []struct {
name string
m *Request
}{
{
name: "empty",
m: &Request{},
},
{
name: "simple",
m: &Request{
Timeseries: []TimeSeries{
{
LabelsRefs: []uint32{
0, 1,
2, 3,
4, 5,
6, 7,
8, 9,
10, 11,
12, 13,
14, 15,
},
Samples: []Sample{{Value: 1, Timestamp: 0}},
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}},
Histograms: nil,
},
{
LabelsRefs: []uint32{
0, 1,
2, 3,
4, 5,
6, 7,
8, 9,
10, 11,
12, 13,
14, 15,
},
Samples: []Sample{{Value: 2, Timestamp: 1}},
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}},
Histograms: nil,
},
},
Symbols: []string{
"a", "b",
"c", "d",
"e", "f",
"g", "h",
"i", "j",
"k", "l",
"m", "n",
"o", "p",
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
// Keep the slice allocated to mimic what std Marshal
// would give to sized Marshal.
got := make([]byte, 0)
// Should be the same as the standard marshal.
expected, err := tt.m.Marshal()
require.NoError(t, err)
got, err = tt.m.OptimizedMarshal(got)
require.NoError(t, err)
require.Equal(t, expected, got)
// Unmarshal should work too.
m := &Request{}
require.NoError(t, m.Unmarshal(got))
require.Equal(t, tt.m, m)
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/write/v2/codec.go | prompb/io/prometheus/write/v2/codec.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
)
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
// ToLabels return model labels.Labels from timeseries' remote labels.
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) (labels.Labels, error) {
return desymbolizeLabels(b, m.GetLabelsRefs(), symbols)
}
// ToMetadata return model metadata from timeseries' remote metadata.
func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata {
typ := model.MetricTypeUnknown
switch m.Metadata.Type {
case Metadata_METRIC_TYPE_COUNTER:
typ = model.MetricTypeCounter
case Metadata_METRIC_TYPE_GAUGE:
typ = model.MetricTypeGauge
case Metadata_METRIC_TYPE_HISTOGRAM:
typ = model.MetricTypeHistogram
case Metadata_METRIC_TYPE_GAUGEHISTOGRAM:
typ = model.MetricTypeGaugeHistogram
case Metadata_METRIC_TYPE_SUMMARY:
typ = model.MetricTypeSummary
case Metadata_METRIC_TYPE_INFO:
typ = model.MetricTypeInfo
case Metadata_METRIC_TYPE_STATESET:
typ = model.MetricTypeStateset
}
return metadata.Metadata{
Type: typ,
Unit: symbols[m.Metadata.UnitRef],
Help: symbols[m.Metadata.HelpRef],
}
}
// FromMetadataType transforms a Prometheus metricType into writev2 metricType.
// Since the former is a string we need to transform it to an enum.
func FromMetadataType(t model.MetricType) Metadata_MetricType {
switch t {
case model.MetricTypeCounter:
return Metadata_METRIC_TYPE_COUNTER
case model.MetricTypeGauge:
return Metadata_METRIC_TYPE_GAUGE
case model.MetricTypeHistogram:
return Metadata_METRIC_TYPE_HISTOGRAM
case model.MetricTypeGaugeHistogram:
return Metadata_METRIC_TYPE_GAUGEHISTOGRAM
case model.MetricTypeSummary:
return Metadata_METRIC_TYPE_SUMMARY
case model.MetricTypeInfo:
return Metadata_METRIC_TYPE_INFO
case model.MetricTypeStateset:
return Metadata_METRIC_TYPE_STATESET
default:
return Metadata_METRIC_TYPE_UNSPECIFIED
}
}
// IsFloatHistogram returns true if the histogram is float.
func (h Histogram) IsFloatHistogram() bool {
_, ok := h.GetCount().(*Histogram_CountFloat)
return ok
}
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
// of integer histogram. If it's a float histogram, the method returns nil.
func (h Histogram) ToIntHistogram() *histogram.Histogram {
if h.IsFloatHistogram() {
return nil
}
return &histogram.Histogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountInt(),
Count: h.GetCountInt(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveDeltas(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeDeltas(),
CustomValues: h.GetCustomValues(),
}
}
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
// of float histogram. If the underlying implementation is an integer histogram, a
// conversion is performed.
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
if h.IsFloatHistogram() {
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountFloat(),
Count: h.GetCountFloat(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeCounts(),
CustomValues: h.GetCustomValues(),
}
}
// Conversion from integer histogram.
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: float64(h.GetZeroCountInt()),
Count: float64(h.GetCountInt()),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
CustomValues: h.GetCustomValues(),
}
}
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := range s {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
func deltasToCounts(deltas []int64) []float64 {
counts := make([]float64, len(deltas))
var cur float64
for i, d := range deltas {
cur += float64(d)
counts[i] = cur
}
return counts
}
// FromIntHistogram returns remote Histogram from the integer Histogram.
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
return Histogram{
Count: &Histogram_CountInt{CountInt: h.Count},
Sum: h.Sum,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
NegativeSpans: spansToSpansProto(h.NegativeSpans),
NegativeDeltas: h.NegativeBuckets,
PositiveSpans: spansToSpansProto(h.PositiveSpans),
PositiveDeltas: h.PositiveBuckets,
ResetHint: Histogram_ResetHint(h.CounterResetHint),
CustomValues: h.CustomValues,
Timestamp: timestamp,
}
}
// FromFloatHistogram returns remote Histogram from the float Histogram.
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
return Histogram{
Count: &Histogram_CountFloat{CountFloat: fh.Count},
Sum: fh.Sum,
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
NegativeCounts: fh.NegativeBuckets,
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
CustomValues: fh.CustomValues,
Timestamp: timestamp,
}
}
func spansToSpansProto(s []histogram.Span) []BucketSpan {
if len(s) == 0 {
return nil
}
spans := make([]BucketSpan, len(s))
for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) (exemplar.Exemplar, error) {
timestamp := m.Timestamp
lbls, err := desymbolizeLabels(b, m.LabelsRefs, symbols)
if err != nil {
return exemplar.Exemplar{}, err
}
return exemplar.Exemplar{
Labels: lbls,
Value: m.Value,
Ts: timestamp,
HasTs: timestamp != 0,
}, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/client/metrics.pb.go | prompb/io/prometheus/client/metrics.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type MetricType int32
const (
// COUNTER must use the Metric field "counter".
MetricType_COUNTER MetricType = 0
// GAUGE must use the Metric field "gauge".
MetricType_GAUGE MetricType = 1
// SUMMARY must use the Metric field "summary".
MetricType_SUMMARY MetricType = 2
// UNTYPED must use the Metric field "untyped".
MetricType_UNTYPED MetricType = 3
// HISTOGRAM must use the Metric field "histogram".
MetricType_HISTOGRAM MetricType = 4
// GAUGE_HISTOGRAM must use the Metric field "histogram".
MetricType_GAUGE_HISTOGRAM MetricType = 5
)
var MetricType_name = map[int32]string{
0: "COUNTER",
1: "GAUGE",
2: "SUMMARY",
3: "UNTYPED",
4: "HISTOGRAM",
5: "GAUGE_HISTOGRAM",
}
var MetricType_value = map[string]int32{
"COUNTER": 0,
"GAUGE": 1,
"SUMMARY": 2,
"UNTYPED": 3,
"HISTOGRAM": 4,
"GAUGE_HISTOGRAM": 5,
}
func (x MetricType) String() string {
return proto.EnumName(MetricType_name, int32(x))
}
func (MetricType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{0}
}
type LabelPair struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{0}
}
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(m, src)
}
func (m *LabelPair) XXX_Size() int {
return m.Size()
}
func (m *LabelPair) XXX_DiscardUnknown() {
xxx_messageInfo_LabelPair.DiscardUnknown(m)
}
var xxx_messageInfo_LabelPair proto.InternalMessageInfo
func (m *LabelPair) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *LabelPair) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type Gauge struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
func (*Gauge) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{1}
}
func (m *Gauge) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(m, src)
}
func (m *Gauge) XXX_Size() int {
return m.Size()
}
func (m *Gauge) XXX_DiscardUnknown() {
xxx_messageInfo_Gauge.DiscardUnknown(m)
}
var xxx_messageInfo_Gauge proto.InternalMessageInfo
func (m *Gauge) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
type Counter struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
func (m *Counter) String() string { return proto.CompactTextString(m) }
func (*Counter) ProtoMessage() {}
func (*Counter) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{2}
}
func (m *Counter) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(m, src)
}
func (m *Counter) XXX_Size() int {
return m.Size()
}
func (m *Counter) XXX_DiscardUnknown() {
xxx_messageInfo_Counter.DiscardUnknown(m)
}
var xxx_messageInfo_Counter proto.InternalMessageInfo
func (m *Counter) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Counter) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
func (m *Counter) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
type Quantile struct {
Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Quantile) Reset() { *m = Quantile{} }
func (m *Quantile) String() string { return proto.CompactTextString(m) }
func (*Quantile) ProtoMessage() {}
func (*Quantile) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{3}
}
func (m *Quantile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(m, src)
}
func (m *Quantile) XXX_Size() int {
return m.Size()
}
func (m *Quantile) XXX_DiscardUnknown() {
xxx_messageInfo_Quantile.DiscardUnknown(m)
}
var xxx_messageInfo_Quantile proto.InternalMessageInfo
func (m *Quantile) GetQuantile() float64 {
if m != nil {
return m.Quantile
}
return 0
}
func (m *Quantile) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
type Summary struct {
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
func (*Summary) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{4}
}
func (m *Summary) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(m, src)
}
func (m *Summary) XXX_Size() int {
return m.Size()
}
func (m *Summary) XXX_DiscardUnknown() {
xxx_messageInfo_Summary.DiscardUnknown(m)
}
var xxx_messageInfo_Summary proto.InternalMessageInfo
func (m *Summary) GetSampleCount() uint64 {
if m != nil {
return m.SampleCount
}
return 0
}
func (m *Summary) GetSampleSum() float64 {
if m != nil {
return m.SampleSum
}
return 0
}
func (m *Summary) GetQuantile() []Quantile {
if m != nil {
return m.Quantile
}
return nil
}
func (m *Summary) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
type Untyped struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Untyped) Reset() { *m = Untyped{} }
func (m *Untyped) String() string { return proto.CompactTextString(m) }
func (*Untyped) ProtoMessage() {}
func (*Untyped) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{5}
}
func (m *Untyped) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(m, src)
}
func (m *Untyped) XXX_Size() int {
return m.Size()
}
func (m *Untyped) XXX_DiscardUnknown() {
xxx_messageInfo_Untyped.DiscardUnknown(m)
}
var xxx_messageInfo_Untyped proto.InternalMessageInfo
func (m *Untyped) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
type Histogram struct {
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"`
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
// Buckets for the classic histogram.
Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
Schema int32 `protobuf:"zigzag32,5,opt,name=schema,proto3" json:"schema,omitempty"`
ZeroThreshold float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"`
// Negative buckets for the native histogram.
NegativeSpan []BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span"`
// Use either "negative_delta" or "negative_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"`
NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"`
// Positive buckets for the native histogram.
// Use a no-op span (offset 0, length 0) for a native histogram without any
// observations yet and with a zero_threshold of 0. Otherwise, it would be
// indistinguishable from a classic histogram.
PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"`
PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"`
// Only used for native histograms. These exemplars MUST have a timestamp.
Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{6}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return m.Size()
}
func (m *Histogram) XXX_DiscardUnknown() {
xxx_messageInfo_Histogram.DiscardUnknown(m)
}
var xxx_messageInfo_Histogram proto.InternalMessageInfo
func (m *Histogram) GetSampleCount() uint64 {
if m != nil {
return m.SampleCount
}
return 0
}
func (m *Histogram) GetSampleCountFloat() float64 {
if m != nil {
return m.SampleCountFloat
}
return 0
}
func (m *Histogram) GetSampleSum() float64 {
if m != nil {
return m.SampleSum
}
return 0
}
func (m *Histogram) GetBucket() []Bucket {
if m != nil {
return m.Bucket
}
return nil
}
func (m *Histogram) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
func (m *Histogram) GetSchema() int32 {
if m != nil {
return m.Schema
}
return 0
}
func (m *Histogram) GetZeroThreshold() float64 {
if m != nil {
return m.ZeroThreshold
}
return 0
}
func (m *Histogram) GetZeroCount() uint64 {
if m != nil {
return m.ZeroCount
}
return 0
}
func (m *Histogram) GetZeroCountFloat() float64 {
if m != nil {
return m.ZeroCountFloat
}
return 0
}
func (m *Histogram) GetNegativeSpan() []BucketSpan {
if m != nil {
return m.NegativeSpan
}
return nil
}
func (m *Histogram) GetNegativeDelta() []int64 {
if m != nil {
return m.NegativeDelta
}
return nil
}
func (m *Histogram) GetNegativeCount() []float64 {
if m != nil {
return m.NegativeCount
}
return nil
}
func (m *Histogram) GetPositiveSpan() []BucketSpan {
if m != nil {
return m.PositiveSpan
}
return nil
}
func (m *Histogram) GetPositiveDelta() []int64 {
if m != nil {
return m.PositiveDelta
}
return nil
}
func (m *Histogram) GetPositiveCount() []float64 {
if m != nil {
return m.PositiveCount
}
return nil
}
func (m *Histogram) GetExemplars() []*Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
type Bucket struct {
CumulativeCount uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount,proto3" json:"cumulative_count,omitempty"`
CumulativeCountFloat float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat,proto3" json:"cumulative_count_float,omitempty"`
UpperBound float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound,proto3" json:"upper_bound,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{7}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(m, src)
}
func (m *Bucket) XXX_Size() int {
return m.Size()
}
func (m *Bucket) XXX_DiscardUnknown() {
xxx_messageInfo_Bucket.DiscardUnknown(m)
}
var xxx_messageInfo_Bucket proto.InternalMessageInfo
func (m *Bucket) GetCumulativeCount() uint64 {
if m != nil {
return m.CumulativeCount
}
return 0
}
func (m *Bucket) GetCumulativeCountFloat() float64 {
if m != nil {
return m.CumulativeCountFloat
}
return 0
}
func (m *Bucket) GetUpperBound() float64 {
if m != nil {
return m.UpperBound
}
return 0
}
func (m *Bucket) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
// A BucketSpan defines a number of consecutive buckets in a native
// histogram with their offset. Logically, it would be more
// straightforward to include the bucket counts in the Span. However,
// the protobuf representation is more compact in the way the data is
// structured here (with all the buckets in a single array separate
// from the Spans).
type BucketSpan struct {
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BucketSpan) Reset() { *m = BucketSpan{} }
func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
func (*BucketSpan) ProtoMessage() {}
func (*BucketSpan) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{8}
}
func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BucketSpan) XXX_Merge(src proto.Message) {
xxx_messageInfo_BucketSpan.Merge(m, src)
}
func (m *BucketSpan) XXX_Size() int {
return m.Size()
}
func (m *BucketSpan) XXX_DiscardUnknown() {
xxx_messageInfo_BucketSpan.DiscardUnknown(m)
}
var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
func (m *BucketSpan) GetOffset() int32 {
if m != nil {
return m.Offset
}
return 0
}
func (m *BucketSpan) GetLength() uint32 {
if m != nil {
return m.Length
}
return 0
}
type Exemplar struct {
Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{9}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return m.Size()
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabel() []LabelPair {
if m != nil {
return m.Label
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() *types.Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
type Metric struct {
Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"`
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"`
Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"`
Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"`
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"`
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"`
TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{10}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return m.Size()
}
func (m *Metric) XXX_DiscardUnknown() {
xxx_messageInfo_Metric.DiscardUnknown(m)
}
var xxx_messageInfo_Metric proto.InternalMessageInfo
func (m *Metric) GetLabel() []LabelPair {
if m != nil {
return m.Label
}
return nil
}
func (m *Metric) GetGauge() *Gauge {
if m != nil {
return m.Gauge
}
return nil
}
func (m *Metric) GetCounter() *Counter {
if m != nil {
return m.Counter
}
return nil
}
func (m *Metric) GetSummary() *Summary {
if m != nil {
return m.Summary
}
return nil
}
func (m *Metric) GetUntyped() *Untyped {
if m != nil {
return m.Untyped
}
return nil
}
func (m *Metric) GetHistogram() *Histogram {
if m != nil {
return m.Histogram
}
return nil
}
func (m *Metric) GetTimestampMs() int64 {
if m != nil {
return m.TimestampMs
}
return 0
}
type MetricFamily struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"`
Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"`
Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
func (*MetricFamily) ProtoMessage() {}
func (*MetricFamily) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{11}
}
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(m, src)
}
func (m *MetricFamily) XXX_Size() int {
return m.Size()
}
func (m *MetricFamily) XXX_DiscardUnknown() {
xxx_messageInfo_MetricFamily.DiscardUnknown(m)
}
var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
func (m *MetricFamily) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *MetricFamily) GetHelp() string {
if m != nil {
return m.Help
}
return ""
}
func (m *MetricFamily) GetType() MetricType {
if m != nil {
return m.Type
}
return MetricType_COUNTER
}
func (m *MetricFamily) GetMetric() []Metric {
if m != nil {
return m.Metric
}
return nil
}
func (m *MetricFamily) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
func init() {
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
}
func init() {
proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
}
var fileDescriptor_d1e5ddb18987a258 = []byte{
// 982 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x8f, 0xdb, 0x44,
0x18, 0xae, 0x9b, 0x4f, 0xbf, 0xd9, 0x6c, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90,
0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0xb2, 0x48, 0xec, 0xb6, 0xdb, 0x14, 0x95, 0xb4, 0x65, 0x92,
0x1c, 0xca, 0xc5, 0x9a, 0x24, 0xb3, 0x8e, 0x85, 0xbf, 0xb0, 0xc7, 0x15, 0xcb, 0x9d, 0xdf, 0xc0,
0x1f, 0xe0, 0x67, 0x70, 0x46, 0x3d, 0x72, 0xe2, 0x88, 0xd0, 0xfe, 0x0e, 0x0e, 0x68, 0xbe, 0xec,
0x6c, 0xe5, 0x2c, 0x2c, 0xdc, 0x3c, 0x8f, 0x9f, 0x67, 0xe6, 0x79, 0x1f, 0xdb, 0xef, 0x6b, 0x70,
0xfc, 0x78, 0x94, 0xa4, 0x71, 0x48, 0xd9, 0x9a, 0xe6, 0xd9, 0x68, 0x19, 0xf8, 0x34, 0x62, 0xa3,
0x90, 0xb2, 0xd4, 0x5f, 0x66, 0xc3, 0x24, 0x8d, 0x59, 0x8c, 0x7a, 0x7e, 0x3c, 0x2c, 0x39, 0x43,
0xc9, 0xd9, 0xef, 0x79, 0xb1, 0x17, 0x0b, 0xc2, 0x88, 0x5f, 0x49, 0xee, 0x7e, 0xdf, 0x8b, 0x63,
0x2f, 0xa0, 0x23, 0xb1, 0x5a, 0xe4, 0xe7, 0x23, 0xe6, 0x87, 0x34, 0x63, 0x24, 0x4c, 0x24, 0xc1,
0xf9, 0x18, 0xcc, 0xaf, 0xc8, 0x82, 0x06, 0xcf, 0x89, 0x9f, 0x22, 0x04, 0xf5, 0x88, 0x84, 0xd4,
0x36, 0x06, 0xc6, 0x91, 0x89, 0xc5, 0x35, 0xea, 0x41, 0xe3, 0x25, 0x09, 0x72, 0x6a, 0xdf, 0x16,
0xa0, 0x5c, 0x38, 0x07, 0xd0, 0x18, 0x93, 0xdc, 0xdb, 0xb8, 0xcd, 0x35, 0x86, 0xbe, 0xfd, 0xb3,
0x01, 0xad, 0x07, 0x71, 0x1e, 0x31, 0x9a, 0x56, 0x33, 0xd0, 0x7d, 0x68, 0xd3, 0xef, 0x69, 0x98,
0x04, 0x24, 0x15, 0x3b, 0x77, 0x3e, 0x3c, 0x1c, 0x56, 0xd5, 0x35, 0x3c, 0x53, 0x2c, 0x5c, 0xf0,
0xd1, 0x18, 0xf6, 0x96, 0x29, 0x25, 0x8c, 0xae, 0xdc, 0xa2, 0x1c, 0xbb, 0x26, 0x36, 0xd9, 0x1f,
0xca, 0x82, 0x87, 0xba, 0xe0, 0xe1, 0x4c, 0x33, 0xb0, 0xa5, 0x44, 0x05, 0xe2, 0x1c, 0x43, 0xfb,
0xeb, 0x9c, 0x44, 0xcc, 0x0f, 0x28, 0xda, 0x87, 0xf6, 0x77, 0xea, 0x5a, 0x39, 0x2d, 0xd6, 0x57,
0x33, 0x28, 0x8a, 0xfc, 0xdd, 0x80, 0xd6, 0x34, 0x0f, 0x43, 0x92, 0x5e, 0xa0, 0xb7, 0x61, 0x27,
0x23, 0x61, 0x12, 0x50, 0x77, 0xc9, 0xcb, 0x16, 0x3b, 0xd4, 0x71, 0x47, 0x62, 0x22, 0x09, 0x74,
0x00, 0xa0, 0x28, 0x59, 0x1e, 0xaa, 0x9d, 0x4c, 0x89, 0x4c, 0xf3, 0x10, 0x7d, 0xb1, 0x71, 0x7e,
0x6d, 0x50, 0xdb, 0x1e, 0x88, 0x76, 0x7c, 0x5a, 0x7f, 0xf5, 0x47, 0xff, 0xd6, 0x86, 0xcb, 0xca,
0x58, 0xea, 0xff, 0x21, 0x96, 0x3e, 0xb4, 0xe6, 0x11, 0xbb, 0x48, 0xe8, 0x6a, 0xcb, 0xe3, 0xfd,
0xab, 0x01, 0xe6, 0x63, 0x3f, 0x63, 0xb1, 0x97, 0x92, 0xf0, 0xdf, 0xd4, 0xfe, 0x3e, 0xa0, 0x4d,
0x8a, 0x7b, 0x1e, 0xc4, 0x84, 0x09, 0x6f, 0x06, 0xb6, 0x36, 0x88, 0x8f, 0x38, 0xfe, 0x4f, 0x49,
0xdd, 0x87, 0xe6, 0x22, 0x5f, 0x7e, 0x4b, 0x99, 0xca, 0xe9, 0xad, 0xea, 0x9c, 0x4e, 0x05, 0x47,
0xa5, 0xa4, 0x14, 0xd5, 0x19, 0xdd, 0xb9, 0x79, 0x46, 0xe8, 0x2e, 0x34, 0xb3, 0xe5, 0x9a, 0x86,
0xc4, 0x6e, 0x0c, 0x8c, 0xa3, 0x3d, 0xac, 0x56, 0xe8, 0x1d, 0xd8, 0xfd, 0x81, 0xa6, 0xb1, 0xcb,
0xd6, 0x29, 0xcd, 0xd6, 0x71, 0xb0, 0xb2, 0x9b, 0xc2, 0x7f, 0x97, 0xa3, 0x33, 0x0d, 0xf2, 0x12,
0x05, 0x4d, 0x26, 0xd6, 0x12, 0x89, 0x99, 0x1c, 0x91, 0x79, 0x1d, 0x81, 0x55, 0xde, 0x56, 0x69,
0xb5, 0xc5, 0x3e, 0xbb, 0x05, 0x49, 0x66, 0xf5, 0x04, 0xba, 0x11, 0xf5, 0x08, 0xf3, 0x5f, 0x52,
0x37, 0x4b, 0x48, 0x64, 0x9b, 0x22, 0x93, 0xc1, 0x75, 0x99, 0x4c, 0x13, 0x12, 0xa9, 0x5c, 0x76,
0xb4, 0x98, 0x63, 0xdc, 0x7c, 0xb1, 0xd9, 0x8a, 0x06, 0x8c, 0xd8, 0x30, 0xa8, 0x1d, 0x21, 0x5c,
0x1c, 0xf1, 0x90, 0x83, 0x57, 0x68, 0xb2, 0x80, 0xce, 0xa0, 0xc6, 0x6b, 0xd4, 0xa8, 0x2c, 0xe2,
0x09, 0x74, 0x93, 0x38, 0xf3, 0x4b, 0x6b, 0x3b, 0x37, 0xb3, 0xa6, 0xc5, 0xda, 0x5a, 0xb1, 0x99,
0xb4, 0xd6, 0x95, 0xd6, 0x34, 0x5a, 0x58, 0x2b, 0x68, 0xd2, 0xda, 0xae, 0xb4, 0xa6, 0x51, 0x69,
0xed, 0x18, 0x4c, 0xdd, 0x4d, 0x32, 0xdb, 0xba, 0xee, 0x6b, 0x2b, 0xda, 0x4f, 0x29, 0x70, 0x7e,
0x35, 0xa0, 0x29, 0xed, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x86, 0x21, 0xdf, 0xff,
0x3b, 0x25, 0x2e, 0xcf, 0xbc, 0x07, 0x77, 0x5f, 0xa7, 0x5e, 0xf9, 0x0e, 0x7a, 0xaf, 0x09, 0xe4,
0xf3, 0xed, 0x43, 0x27, 0x4f, 0x12, 0x9a, 0xba, 0x8b, 0x38, 0x8f, 0x56, 0xea, 0x63, 0x00, 0x01,
0x9d, 0x72, 0xe4, 0x4a, 0x23, 0xad, 0xdd, 0xac, 0x91, 0x3a, 0xc7, 0x00, 0x65, 0xec, 0xfc, 0x95,
0x8e, 0xcf, 0xcf, 0x33, 0x2a, 0x2b, 0xd8, 0xc3, 0x6a, 0xc5, 0xf1, 0x80, 0x46, 0x1e, 0x5b, 0x8b,
0xd3, 0xbb, 0x58, 0xad, 0x9c, 0x9f, 0x0c, 0x68, 0xeb, 0x4d, 0xd1, 0x67, 0xd0, 0x08, 0xf8, 0x1c,
0xb1, 0x0d, 0x91, 0x66, 0xbf, 0xda, 0x43, 0x31, 0x6a, 0xd4, 0x33, 0x96, 0x9a, 0xea, 0xfe, 0x8a,
0x3e, 0x05, 0xf3, 0x26, 0xed, 0xbd, 0x24, 0x3b, 0x3f, 0xd6, 0xa0, 0x39, 0x11, 0x33, 0xf3, 0xff,
0xf9, 0xfa, 0x00, 0x1a, 0x1e, 0x9f, 0x72, 0x6a, 0x42, 0xbd, 0x59, 0x2d, 0x16, 0x83, 0x10, 0x4b,
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/client/decoder.go | prompb/io/prometheus/client/decoder.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package io_prometheus_client //nolint:revive
import (
"encoding/binary"
"errors"
"fmt"
"io"
"unicode/utf8"
"unsafe"
proto "github.com/gogo/protobuf/proto"
"github.com/prometheus/common/model"
)
type MetricStreamingDecoder struct {
in []byte
inPos int
// TODO(bwplotka): Switch to generator/plugin that won't have those fields accessible e.g. OpaqueAPI
// We leverage the fact those two don't collide.
*MetricFamily // Without Metric, guarded by overridden GetMetric method.
*Metric // Without Label, guarded by overridden GetLabel method.
mfData []byte
metrics []pos
metricIndex int
mData []byte
labels []pos
}
// NewMetricStreamingDecoder returns a Go iterator that unmarshals given protobuf bytes one
// metric family and metric at the time, allowing efficient streaming.
//
// Do not modify MetricStreamingDecoder between iterations as it's reused to save allocations.
// GetGauge, GetCounter, etc are also cached, which means GetGauge will work for counter
// if previously gauge was parsed. It's up to the caller to use Type to decide what
// method to use when checking the value.
//
// TODO(bwplotka): io.Reader approach is possible too, but textparse has access to whole scrape for now.
func NewMetricStreamingDecoder(data []byte) *MetricStreamingDecoder {
return &MetricStreamingDecoder{
in: data,
MetricFamily: &MetricFamily{},
Metric: &Metric{},
metrics: make([]pos, 0, 100),
}
}
var errInvalidVarint = errors.New("clientpb: invalid varint encountered")
// NextMetricFamily decodes the next metric family from the input without metrics.
// Use NextMetric() to decode metrics. The MetricFamily fields Name, Help and Unit
// are only valid until NextMetricFamily is called again.
func (m *MetricStreamingDecoder) NextMetricFamily() error {
b := m.in[m.inPos:]
if len(b) == 0 {
return io.EOF
}
messageLength, varIntLength := proto.DecodeVarint(b) // TODO(bwplotka): Get rid of gogo.
if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 {
return errInvalidVarint
}
totalLength := varIntLength + int(messageLength)
if totalLength > len(b) {
return fmt.Errorf("clientpb: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b))
}
m.resetMetricFamily()
m.mfData = b[varIntLength:totalLength]
m.inPos += totalLength
return m.unmarshalWithoutMetrics(m, m.mfData)
}
// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory.
func (m *MetricStreamingDecoder) resetMetricFamily() {
m.metrics = m.metrics[:0]
m.metricIndex = 0
m.MetricFamily.Reset()
}
func (m *MetricStreamingDecoder) NextMetric() error {
if m.metricIndex >= len(m.metrics) {
return io.EOF
}
m.resetMetric()
m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end]
if err := m.unmarshalWithoutLabels(m, m.mData); err != nil {
return err
}
m.metricIndex++
return nil
}
// resetMetric resets all the fields in m to equal the zero value, but re-using slices memory.
func (m *MetricStreamingDecoder) resetMetric() {
m.labels = m.labels[:0]
m.TimestampMs = 0
// TODO(bwplotka): Autogenerate reset functions.
if m.Counter != nil {
m.Counter.Value = 0
m.Counter.CreatedTimestamp = nil
m.Counter.Exemplar = nil
}
if m.Gauge != nil {
m.Gauge.Value = 0
}
if m.Histogram != nil {
m.Histogram.SampleCount = 0
m.Histogram.SampleCountFloat = 0
m.Histogram.SampleSum = 0
m.Histogram.Bucket = m.Histogram.Bucket[:0]
m.Histogram.CreatedTimestamp = nil
m.Histogram.Schema = 0
m.Histogram.ZeroThreshold = 0
m.Histogram.ZeroCount = 0
m.Histogram.ZeroCountFloat = 0
m.Histogram.NegativeSpan = m.Histogram.NegativeSpan[:0]
m.Histogram.NegativeDelta = m.Histogram.NegativeDelta[:0]
m.Histogram.NegativeCount = m.Histogram.NegativeCount[:0]
m.Histogram.PositiveSpan = m.Histogram.PositiveSpan[:0]
m.Histogram.PositiveDelta = m.Histogram.PositiveDelta[:0]
m.Histogram.PositiveCount = m.Histogram.PositiveCount[:0]
m.Histogram.Exemplars = m.Histogram.Exemplars[:0]
}
if m.Summary != nil {
m.Summary.SampleCount = 0
m.Summary.SampleSum = 0
m.Summary.Quantile = m.Summary.Quantile[:0]
m.Summary.CreatedTimestamp = nil
}
}
func (*MetricStreamingDecoder) GetMetric() {
panic("don't use GetMetric, use Metric directly")
}
func (*MetricStreamingDecoder) GetLabel() {
panic("don't use GetLabel, use Label instead")
}
// unsafeLabelAdder adds labels for a single metric.
// The "unsafe" word highlights that some strings must not be retained on a
// caller side. When used with labels.ScratchBuilder ensure it's used
// with SetUnsafeAdd set to true.
type unsafeLabelAdder interface {
Add(name, value string)
}
// Label parses labels into unsafeLabelAdder. Metric name is missing
// given the protobuf metric model and has to be deduced from the metric family name.
//
// TODO: The Label method name intentionally hide MetricStreamingDecoder.Metric.Label
// field to avoid direct use (it's not parsed). In future generator will generate
// structs tailored for streaming decoding.
//
// Unsafe in this context means that bytes and strings are reused across iterations.
// They are live only until the next NextMetric() or NextMetricFamily() call.
func (m *MetricStreamingDecoder) Label(b unsafeLabelAdder) error {
for _, l := range m.labels {
if err := parseLabel(m.mData[l.start:l.end], b); err != nil {
return err
}
}
return nil
}
// parseLabel is essentially LabelPair.Unmarshal but directly adding into unsafeLabelAdder.
func parseLabel(dAtA []byte, b unsafeLabelAdder) error {
var unsafeName, unsafeValue string
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return errors.New("proto: LabelPair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
unsafeName = yoloString(dAtA[iNdEx:postIndex])
if !model.UTF8Validation.IsValidLabelName(unsafeName) {
return fmt.Errorf("invalid label name: %s", unsafeName)
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
unsafeValue = yoloString(dAtA[iNdEx:postIndex])
if !utf8.ValidString(unsafeValue) {
return fmt.Errorf("invalid label value: %s", unsafeValue)
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
b.Add(unsafeName, unsafeValue)
return nil
}
func yoloString(b []byte) string {
return unsafe.String(unsafe.SliceData(b), len(b))
}
type pos struct {
start, end int
}
func (m *Metric) unmarshalWithoutLabels(p *MetricStreamingDecoder, dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return errors.New("proto: Metric: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
p.labels = append(p.labels, pos{start: iNdEx, end: postIndex})
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Gauge == nil {
m.Gauge = &Gauge{}
}
if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Counter == nil {
m.Counter = &Counter{}
}
if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Summary == nil {
m.Summary = &Summary{}
}
if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Untyped == nil {
m.Untyped = &Untyped{}
}
if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType)
}
m.TimestampMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Histogram == nil {
m.Histogram = &Histogram{}
}
if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MetricFamily) unmarshalWithoutMetrics(buf *MetricStreamingDecoder, dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return errors.New("proto: MetricFamily: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = yoloString(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Help = yoloString(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= MetricType(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
buf.metrics = append(buf.metrics, pos{start: iNdEx, end: postIndex})
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Unit = yoloString(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/prompb/io/prometheus/client/decoder_test.go | prompb/io/prometheus/client/decoder_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package io_prometheus_client //nolint:revive
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/rand"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/pool"
)
const (
testGauge = `name: "go_build_info"
help: "Build information about the main Go module."
type: GAUGE
metric: <
label: <
name: "checksum"
value: ""
>
label: <
name: "path"
value: "github.com/prometheus/client_golang"
>
label: <
name: "version"
value: "(devel)"
>
gauge: <
value: 1
>
>
metric: <
label: <
name: "checksum"
value: ""
>
label: <
name: "path"
value: "github.com/prometheus/prometheus"
>
label: <
name: "version"
value: "v3.0.0"
>
gauge: <
value: 2
>
>
`
testCounter = `name: "go_memstats_alloc_bytes_total"
help: "Total number of bytes allocated, even if freed."
type: COUNTER
unit: "bytes"
metric: <
counter: <
value: 1.546544e+06
exemplar: <
label: <
name: "dummyID"
value: "42"
>
value: 12
timestamp: <
seconds: 1625851151
nanos: 233181499
>
>
>
>
`
)
func TestMetricStreamingDecoder(t *testing.T) {
varintBuf := make([]byte, binary.MaxVarintLen32)
buf := bytes.Buffer{}
for _, m := range []string{testGauge, testCounter} {
mf := &MetricFamily{}
require.NoError(t, proto.UnmarshalText(m, mf))
// From proto message to binary protobuf.
protoBuf, err := proto.Marshal(mf)
require.NoError(t, err)
// Write first length, then binary protobuf.
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
buf.Write(varintBuf[:varintLength])
buf.Write(protoBuf)
}
d := NewMetricStreamingDecoder(buf.Bytes())
require.NoError(t, d.NextMetricFamily())
nextFn := func() error {
for {
err := d.NextMetric()
if errors.Is(err, io.EOF) {
if err := d.NextMetricFamily(); err != nil {
return err
}
continue
}
return err
}
}
var firstMetricLset labels.Labels
{
require.NoError(t, nextFn())
require.Equal(t, "go_build_info", d.GetName())
require.Equal(t, "Build information about the main Go module.", d.GetHelp())
require.Equal(t, MetricType_GAUGE, d.GetType())
require.Equal(t, float64(1), d.GetGauge().GetValue())
b := labels.NewScratchBuilder(0)
require.NoError(t, d.Label(&b))
firstMetricLset = b.Labels()
require.Equal(t, `{checksum="", path="github.com/prometheus/client_golang", version="(devel)"}`, firstMetricLset.String())
}
{
require.NoError(t, nextFn())
require.Equal(t, "go_build_info", d.GetName())
require.Equal(t, "Build information about the main Go module.", d.GetHelp())
require.Equal(t, MetricType_GAUGE, d.GetType())
require.Equal(t, float64(2), d.GetGauge().GetValue())
b := labels.NewScratchBuilder(0)
require.NoError(t, d.Label(&b))
require.Equal(t, `{checksum="", path="github.com/prometheus/prometheus", version="v3.0.0"}`, b.Labels().String())
}
{
// Different mf now.
require.NoError(t, nextFn())
require.Equal(t, "go_memstats_alloc_bytes_total", d.GetName())
require.Equal(t, "Total number of bytes allocated, even if freed.", d.GetHelp())
require.Equal(t, "bytes", d.GetUnit())
require.Equal(t, MetricType_COUNTER, d.GetType())
require.Equal(t, 1.546544e+06, d.Metric.GetCounter().GetValue())
b := labels.NewScratchBuilder(0)
b.SetUnsafeAdd(true)
require.NoError(t, d.Label(&b))
require.Equal(t, `{}`, b.Labels().String())
}
require.Equal(t, io.EOF, nextFn())
// Expect labels and metricBytes to be static and reusable even after parsing.
require.Equal(t, `{checksum="", path="github.com/prometheus/client_golang", version="(devel)"}`, firstMetricLset.String())
}
// Regression test against https://github.com/prometheus/prometheus/pull/16946
func TestMetricStreamingDecoder_LabelsCorruption(t *testing.T) {
lastScrapeSize := 0
var allPreviousLabels []labels.Labels
buffers := pool.New(128, 1024, 2, func(sz int) any { return make([]byte, 0, sz) })
builder := labels.NewScratchBuilder(0)
builder.SetUnsafeAdd(true)
for _, labelsCount := range []int{1, 2, 3, 5, 8, 5, 3, 2, 1} {
// Get buffer from pool like in scrape.go
b := buffers.Get(lastScrapeSize).([]byte)
buf := bytes.NewBuffer(b)
// Generate some scraped data to parse
mf := &MetricFamily{}
data := generateMetricFamilyText(labelsCount)
require.NoError(t, proto.UnmarshalText(data, mf))
protoBuf, err := proto.Marshal(mf)
require.NoError(t, err)
sizeBuf := make([]byte, binary.MaxVarintLen32)
sizeBufSize := binary.PutUvarint(sizeBuf, uint64(len(protoBuf)))
buf.Write(sizeBuf[:sizeBufSize])
buf.Write(protoBuf)
// Use decoder like protobufparse.go would
b = buf.Bytes()
d := NewMetricStreamingDecoder(b)
require.NoError(t, d.NextMetricFamily())
require.NoError(t, d.NextMetric())
// Get the labels. Decode is reusing strings when adding labels. We
// test if scratchBuilder with unsafeAdd set to true handles that.
builder.Reset()
require.NoError(t, d.Label(&builder))
lbs := builder.Labels()
allPreviousLabels = append(allPreviousLabels, lbs)
// Validate all labels seen so far remain valid and not corrupted
for _, l := range allPreviousLabels {
require.True(t, l.IsValid(model.LegacyValidation), "encountered corrupted labels: %v", l)
}
lastScrapeSize = len(b)
buffers.Put(b)
}
}
func generateLabels() string {
randomName := fmt.Sprintf("instance_%d", rand.Intn(1000))
randomValue := fmt.Sprintf("value_%d", rand.Intn(1000))
return fmt.Sprintf(`label: <
name: "%s"
value: "%s"
>`, randomName, randomValue)
}
func generateMetricFamilyText(labelsCount int) string {
randomName := fmt.Sprintf("metric_%d", rand.Intn(1000))
randomHelp := fmt.Sprintf("Test metric to demonstrate forced corruption %d.", rand.Intn(1000))
labels10 := ""
var labels10Sb239 strings.Builder
for range labelsCount {
labels10Sb239.WriteString(generateLabels())
}
labels10 += labels10Sb239.String()
return fmt.Sprintf(`name: "%s"
help: "%s"
type: GAUGE
metric: <
%s
gauge: <
value: 1.0
>
>
`, randomName, randomHelp, labels10)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/querier_test.go | tsdb/querier_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"math"
"math/rand"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/oklog/ulid/v2"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/testutil"
)
// TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet.
type mockSeriesSet struct {
next func() bool
series func() storage.Series
ws func() annotations.Annotations
err func() error
}
func (m *mockSeriesSet) Next() bool { return m.next() }
func (m *mockSeriesSet) At() storage.Series { return m.series() }
func (m *mockSeriesSet) Err() error { return m.err() }
func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.ws() }
func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
i := -1
return &mockSeriesSet{
next: func() bool {
i++
return i < len(list)
},
series: func() storage.Series {
return list[i]
},
err: func() error { return nil },
ws: func() annotations.Annotations { return nil },
}
}
type mockChunkSeriesSet struct {
next func() bool
series func() storage.ChunkSeries
ws func() annotations.Annotations
err func() error
}
func (m *mockChunkSeriesSet) Next() bool { return m.next() }
func (m *mockChunkSeriesSet) At() storage.ChunkSeries { return m.series() }
func (m *mockChunkSeriesSet) Err() error { return m.err() }
func (m *mockChunkSeriesSet) Warnings() annotations.Annotations { return m.ws() }
func newMockChunkSeriesSet(list []storage.ChunkSeries) *mockChunkSeriesSet {
i := -1
return &mockChunkSeriesSet{
next: func() bool {
i++
return i < len(list)
},
series: func() storage.ChunkSeries {
return list[i]
},
err: func() error { return nil },
ws: func() annotations.Annotations { return nil },
}
}
type seriesSamples struct {
lset map[string]string
chunks [][]sample
}
// Index: labels -> postings -> chunkMetas -> chunkRef.
// ChunkReader: ref -> vals.
func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) {
sort.Slice(tc, func(i, _ int) bool {
return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0
})
postings := index.NewMemPostings()
chkReader := mockChunkReader(make(map[chunks.ChunkRef]chunkenc.Chunk))
lblIdx := make(map[string]map[string]struct{})
mi := newMockIndex()
blockMint := int64(math.MaxInt64)
blockMaxt := int64(math.MinInt64)
var chunkRef chunks.ChunkRef
for i, s := range tc {
i++ // 0 is not a valid posting.
metas := make([]chunks.Meta, 0, len(s.chunks))
for _, chk := range s.chunks {
if chk[0].t < blockMint {
blockMint = chk[0].t
}
if chk[len(chk)-1].t > blockMaxt {
blockMaxt = chk[len(chk)-1].t
}
metas = append(metas, chunks.Meta{
MinTime: chk[0].t,
MaxTime: chk[len(chk)-1].t,
Ref: chunkRef,
})
switch {
case chk[0].fh != nil:
chunk := chunkenc.NewFloatHistogramChunk()
app, _ := chunk.Appender()
for _, smpl := range chk {
require.NotNil(t, smpl.fh, "chunk can only contain one type of sample")
_, _, _, err := app.AppendFloatHistogram(nil, smpl.t, smpl.fh, true)
require.NoError(t, err, "chunk should be appendable")
}
chkReader[chunkRef] = chunk
case chk[0].h != nil:
chunk := chunkenc.NewHistogramChunk()
app, _ := chunk.Appender()
for _, smpl := range chk {
require.NotNil(t, smpl.h, "chunk can only contain one type of sample")
_, _, _, err := app.AppendHistogram(nil, smpl.t, smpl.h, true)
require.NoError(t, err, "chunk should be appendable")
}
chkReader[chunkRef] = chunk
default:
chunk := chunkenc.NewXORChunk()
app, _ := chunk.Appender()
for _, smpl := range chk {
require.Nil(t, smpl.h, "chunk can only contain one type of sample")
require.Nil(t, smpl.fh, "chunk can only contain one type of sample")
app.Append(smpl.t, smpl.f)
}
chkReader[chunkRef] = chunk
}
chunkRef++
}
ls := labels.FromMap(s.lset)
require.NoError(t, mi.AddSeries(storage.SeriesRef(i), ls, metas...))
postings.Add(storage.SeriesRef(i), ls)
ls.Range(func(l labels.Label) {
vs, present := lblIdx[l.Name]
if !present {
vs = map[string]struct{}{}
lblIdx[l.Name] = vs
}
vs[l.Value] = struct{}{}
})
}
require.NoError(t, postings.Iter(func(l labels.Label, p index.Postings) error {
return mi.WritePostings(l.Name, l.Value, p)
}))
return mi, chkReader, blockMint, blockMaxt
}
type blockQuerierTestCase struct {
mint, maxt int64
ms []*labels.Matcher
hints *storage.SelectHints
exp storage.SeriesSet
expChks storage.ChunkSeriesSet
}
func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr ChunkReader, stones *tombstones.MemTombstones) {
t.Run("sample", func(t *testing.T) {
q := blockQuerier{
blockBaseQuerier: &blockBaseQuerier{
index: ir,
chunks: cr,
tombstones: stones,
mint: c.mint,
maxt: c.maxt,
},
}
res := q.Select(context.Background(), false, c.hints, c.ms...)
defer func() { require.NoError(t, q.Close()) }()
for {
eok, rok := c.exp.Next(), res.Next()
require.Equal(t, eok, rok)
if !eok {
require.Empty(t, res.Warnings())
break
}
sexp := c.exp.At()
sres := res.At()
require.Equal(t, sexp.Labels(), sres.Labels())
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
require.NoError(t, res.Err())
})
t.Run("chunk", func(t *testing.T) {
q := blockChunkQuerier{
blockBaseQuerier: &blockBaseQuerier{
index: ir,
chunks: cr,
tombstones: stones,
mint: c.mint,
maxt: c.maxt,
},
}
res := q.Select(context.Background(), false, c.hints, c.ms...)
defer func() { require.NoError(t, q.Close()) }()
for {
eok, rok := c.expChks.Next(), res.Next()
require.Equal(t, eok, rok)
if !eok {
require.Empty(t, res.Warnings())
break
}
sexpChks := c.expChks.At()
sres := res.At()
require.Equal(t, sexpChks.Labels(), sres.Labels())
chksExp, errExp := storage.ExpandChunks(sexpChks.Iterator(nil))
rmChunkRefs(chksExp)
chksRes, errRes := storage.ExpandChunks(sres.Iterator(nil))
rmChunkRefs(chksRes)
require.Equal(t, errExp, errRes)
require.Len(t, chksRes, len(chksExp))
var exp, act [][]chunks.Sample
for i := range chksExp {
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
require.NoError(t, err)
exp = append(exp, samples)
samples, err = storage.ExpandSamples(chksRes[i].Chunk.Iterator(nil), nil)
require.NoError(t, err)
act = append(act, samples)
}
require.Equal(t, exp, act)
}
require.NoError(t, res.Err())
})
}
func TestBlockQuerier(t *testing.T) {
for _, c := range []blockQuerierTestCase{
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: 1,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: math.MinInt64,
maxt: math.MaxInt64,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "x")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: math.MinInt64,
maxt: math.MaxInt64,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("b", "b"),
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
),
}),
},
{
mint: 2,
maxt: 6,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
},
{
// This test runs a query disabling trimming. All chunks containing at least 1 sample within the queried
// time range will be returned.
mint: 2,
maxt: 6,
hints: &storage.SelectHints{Start: 2, End: 6, DisableTrimming: true},
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}},
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
},
{
// This test runs a query disabling trimming. All chunks containing at least 1 sample within the queried
// time range will be returned.
mint: 5,
maxt: 6,
hints: &storage.SelectHints{Start: 5, End: 6, DisableTrimming: true},
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
},
} {
t.Run("", func(t *testing.T) {
ir, cr, _, _ := createIdxChkReaders(t, testData)
testBlockQuerier(t, c, ir, cr, tombstones.NewMemTombstones())
})
}
}
func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
for _, c := range []blockQuerierTestCase{
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: 1,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: math.MinInt64,
maxt: math.MaxInt64,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "x")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: math.MinInt64,
maxt: math.MaxInt64,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("b", "b"),
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
),
}),
},
{
mint: 2,
maxt: 6,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
},
} {
t.Run("", func(t *testing.T) {
opts := DefaultHeadOptions()
opts.ChunkRange = 2 * time.Hour.Milliseconds()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(t, err)
defer h.Close()
app := h.Appender(context.Background())
for _, s := range testData {
for _, chk := range s.chunks {
for _, sample := range chk {
_, err = app.Append(0, labels.FromMap(s.lset), sample.t, sample.f)
require.NoError(t, err)
}
}
}
require.NoError(t, app.Commit())
hr := NewRangeHead(h, c.mint, c.maxt)
ir, err := hr.Index()
require.NoError(t, err)
defer ir.Close()
cr, err := hr.Chunks()
require.NoError(t, err)
defer cr.Close()
testBlockQuerier(t, c, ir, cr, tombstones.NewMemTombstones())
})
}
}
func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing.T) {
ctx := context.Background()
c := blockQuerierTestCase{
mint: 2,
maxt: 6,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
}
ir, cr, _, _ := createIdxChkReaders(t, testData)
stones := tombstones.NewMemTombstones()
p, err := ir.Postings(ctx, "a", "a")
require.NoError(t, err)
refs, err := index.ExpandPostings(p)
require.NoError(t, err)
for _, ref := range refs {
stones.AddInterval(ref, tombstones.Interval{Mint: 1, Maxt: 2})
}
testBlockQuerier(t, c, ir, cr, stones)
for _, ref := range refs {
intervals, err := stones.Get(ref)
require.NoError(t, err)
// Without copy, the intervals could be [math.MinInt64, 2].
require.Equal(t, tombstones.Intervals{{Mint: 1, Maxt: 2}}, intervals)
}
}
var testData = []seriesSamples{
{
lset: map[string]string{"a": "a"},
chunks: [][]sample{
{{1, 2, nil, nil}, {2, 3, nil, nil}, {3, 4, nil, nil}},
{{5, 2, nil, nil}, {6, 3, nil, nil}, {7, 4, nil, nil}},
},
},
{
lset: map[string]string{"a": "a", "b": "b"},
chunks: [][]sample{
{{1, 1, nil, nil}, {2, 2, nil, nil}, {3, 3, nil, nil}},
{{5, 3, nil, nil}, {6, 6, nil, nil}},
},
},
{
lset: map[string]string{"b": "b"},
chunks: [][]sample{
{{1, 3, nil, nil}, {2, 2, nil, nil}, {3, 6, nil, nil}},
{{5, 1, nil, nil}, {6, 7, nil, nil}, {7, 2, nil, nil}},
},
},
}
func TestBlockQuerierDelete(t *testing.T) {
stones := tombstones.NewTestMemTombstones([]tombstones.Intervals{
{{Mint: 1, Maxt: 3}},
{{Mint: 1, Maxt: 3}, {Mint: 6, Maxt: 10}},
{{Mint: 6, Maxt: 10}},
})
for _, c := range []blockQuerierTestCase{
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: 1,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: math.MinInt64,
maxt: math.MaxInt64,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "x")},
exp: newMockSeriesSet([]storage.Series{}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
},
{
mint: math.MinInt64,
maxt: math.MaxInt64,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{5, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("b", "b"),
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{5, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}},
),
}),
},
{
mint: 2,
maxt: 6,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{5, 3, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]chunks.Sample{sample{5, 3, nil, nil}},
),
}),
},
} {
t.Run("", func(t *testing.T) {
ir, cr, _, _ := createIdxChkReaders(t, testData)
testBlockQuerier(t, c, ir, cr, stones)
})
}
}
type fakeChunksReader struct {
ChunkReader
chks map[chunks.ChunkRef]chunkenc.Chunk
iterables map[chunks.ChunkRef]chunkenc.Iterable
}
func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) {
f := &fakeChunksReader{
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
iterables: map[chunks.ChunkRef]chunkenc.Iterable{},
}
chks := make([]chunks.Meta, 0, len(s))
for ref, samples := range s {
chk, _ := chunks.ChunkFromSamples(samples)
f.chks[chunks.ChunkRef(ref)] = chk.Chunk
chks = append(chks, chunks.Meta{
Ref: chunks.ChunkRef(ref),
MinTime: chk.MinTime,
MaxTime: chk.MaxTime,
})
}
return f, chks
}
// Samples in each slice are assumed to be sorted.
func createFakeReaderAndIterables(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) {
f := &fakeChunksReader{
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
iterables: map[chunks.ChunkRef]chunkenc.Iterable{},
}
chks := make([]chunks.Meta, 0, len(s))
for ref, samples := range s {
f.iterables[chunks.ChunkRef(ref)] = &mockIterable{s: samples}
var minTime, maxTime int64
if len(samples) > 0 {
minTime = samples[0].T()
maxTime = samples[len(samples)-1].T()
}
chks = append(chks, chunks.Meta{
Ref: chunks.ChunkRef(ref),
MinTime: minTime,
MaxTime: maxTime,
})
}
return f, chks
}
func (r *fakeChunksReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
if chk, ok := r.chks[meta.Ref]; ok {
return chk, nil, nil
}
if it, ok := r.iterables[meta.Ref]; ok {
return nil, it, nil
}
return nil, nil, fmt.Errorf("chunk or iterable not found at ref %v", meta.Ref)
}
type mockIterable struct {
s []chunks.Sample
}
func (it *mockIterable) Iterator(chunkenc.Iterator) chunkenc.Iterator {
return &mockSampleIterator{
s: it.s,
idx: -1,
}
}
type mockSampleIterator struct {
s []chunks.Sample
idx int
}
func (it *mockSampleIterator) Seek(t int64) chunkenc.ValueType {
for ; it.idx < len(it.s); it.idx++ {
if it.idx != -1 && it.s[it.idx].T() >= t {
return it.s[it.idx].Type()
}
}
return chunkenc.ValNone
}
func (it *mockSampleIterator) At() (int64, float64) {
return it.s[it.idx].T(), it.s[it.idx].F()
}
func (it *mockSampleIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
return it.s[it.idx].T(), it.s[it.idx].H()
}
func (it *mockSampleIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return it.s[it.idx].T(), it.s[it.idx].FH()
}
func (it *mockSampleIterator) AtT() int64 {
return it.s[it.idx].T()
}
func (it *mockSampleIterator) Next() chunkenc.ValueType {
if it.idx < len(it.s)-1 {
it.idx++
return it.s[it.idx].Type()
}
return chunkenc.ValNone
}
func (*mockSampleIterator) Err() error { return nil }
func TestPopulateWithTombSeriesIterators(t *testing.T) {
type minMaxTimes struct {
minTime, maxTime int64
}
cases := []struct {
name string
samples [][]chunks.Sample
expected []chunks.Sample
expectedChks []chunks.Meta
expectedMinMaxTimes []minMaxTimes
intervals tombstones.Intervals
// Seek being zero means do not test seek.
seek int64
seekSuccess bool
// Set this to true if a sample slice will form multiple chunks.
skipChunkTest bool
skipIterableTest bool
}{
{
name: "no chunk",
samples: [][]chunks.Sample{},
},
{
name: "one empty chunk", // This should never happen.
samples: [][]chunks.Sample{{}},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{}),
},
expectedMinMaxTimes: []minMaxTimes{{0, 0}},
// iterables with no samples will return no chunks instead of empty chunks
skipIterableTest: true,
},
{
name: "one empty iterable",
samples: [][]chunks.Sample{{}},
// iterables with no samples will return no chunks
expectedChks: nil,
skipChunkTest: true,
},
{
name: "three empty chunks", // This should never happen.
samples: [][]chunks.Sample{{}, {}, {}},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{}),
assureChunkFromSamples(t, []chunks.Sample{}),
assureChunkFromSamples(t, []chunks.Sample{}),
},
expectedMinMaxTimes: []minMaxTimes{{0, 0}, {0, 0}, {0, 0}},
// iterables with no samples will return no chunks instead of empty chunks
skipIterableTest: true,
},
{
name: "three empty iterables",
samples: [][]chunks.Sample{{}, {}, {}},
// iterables with no samples will return no chunks
expectedChks: nil,
skipChunkTest: true,
},
{
name: "one chunk",
samples: [][]chunks.Sample{
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
},
expected: []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
},
{
name: "two full chunks",
samples: [][]chunks.Sample{
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
},
expected: []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}},
},
{
name: "three full chunks",
samples: [][]chunks.Sample{
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
{sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}},
},
expected: []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}},
},
// Seek cases.
{
name: "three empty chunks and seek", // This should never happen.
samples: [][]chunks.Sample{{}, {}, {}},
seek: 1,
seekSuccess: false,
},
{
name: "two chunks and seek beyond chunks",
samples: [][]chunks.Sample{
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
},
seek: 10,
seekSuccess: false,
},
{
name: "two chunks and seek on middle of first chunk",
samples: [][]chunks.Sample{
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
},
seek: 2,
seekSuccess: true,
expected: []chunks.Sample{
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
},
},
{
name: "two chunks and seek before first chunk",
samples: [][]chunks.Sample{
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
},
seek: -32,
seekSuccess: true,
expected: []chunks.Sample{
sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
},
},
// Deletion / Trim cases.
{
name: "no chunk with deletion interval",
samples: [][]chunks.Sample{},
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/ooo_head.go | tsdb/ooo_head.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"sort"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
// OOOChunk maintains samples in time-ascending order.
// Inserts for timestamps already seen, are dropped.
// Samples are stored uncompressed to allow easy sorting.
// Perhaps we can be more efficient later.
type OOOChunk struct {
samples []sample
}
func NewOOOChunk() *OOOChunk {
return &OOOChunk{samples: make([]sample, 0, 4)}
}
// Insert inserts the sample such that order is maintained.
// Returns false if insert was not possible due to the same timestamp already existing.
func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) bool {
// Although out-of-order samples can be out-of-order amongst themselves, we
// are opinionated and expect them to be usually in-order meaning we could
// try to append at the end first if the new timestamp is higher than the
// last known timestamp.
if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t {
o.samples = append(o.samples, sample{t, v, h, fh})
return true
}
// Find index of sample we should replace.
i := sort.Search(len(o.samples), func(i int) bool { return o.samples[i].t >= t })
if i >= len(o.samples) {
// none found. append it at the end
o.samples = append(o.samples, sample{t, v, h, fh})
return true
}
// Duplicate sample for timestamp is not allowed.
if o.samples[i].t == t {
return false
}
// Expand length by 1 to make room. use a zero sample, we will overwrite it anyway.
o.samples = append(o.samples, sample{})
copy(o.samples[i+1:], o.samples[i:])
o.samples[i] = sample{t, v, h, fh}
return true
}
func (o *OOOChunk) NumSamples() int {
return len(o.samples)
}
// ToEncodedChunks returns chunks with the samples in the OOOChunk.
//
//nolint:revive
func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) {
if len(o.samples) == 0 {
return nil, nil
}
// The most common case is that there will be a single chunk, with the same type of samples in it - this is always true for float samples.
chks = make([]memChunk, 0, 1)
var (
cmint int64
cmaxt int64
chunk chunkenc.Chunk
app chunkenc.Appender
)
prevEncoding := chunkenc.EncNone // Yes we could call the chunk for this, but this is more efficient.
for _, s := range o.samples {
if s.t < mint {
continue
}
if s.t > maxt {
break
}
encoding := chunkenc.EncXOR
if s.h != nil {
encoding = chunkenc.EncHistogram
} else if s.fh != nil {
encoding = chunkenc.EncFloatHistogram
}
// prevApp is the appender for the previous sample.
prevApp := app
if encoding != prevEncoding { // For the first sample, this will always be true as EncNone != EncXOR | EncHistogram | EncFloatHistogram
if prevEncoding != chunkenc.EncNone {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
cmint = s.t
switch encoding {
case chunkenc.EncXOR:
chunk = chunkenc.NewXORChunk()
case chunkenc.EncHistogram:
chunk = chunkenc.NewHistogramChunk()
case chunkenc.EncFloatHistogram:
chunk = chunkenc.NewFloatHistogramChunk()
default:
chunk = chunkenc.NewXORChunk()
}
app, err = chunk.Appender()
if err != nil {
return chks, err
}
}
switch encoding {
case chunkenc.EncXOR:
app.Append(s.t, s.f)
case chunkenc.EncHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevHApp, _ := prevApp.(*chunkenc.HistogramAppender)
var (
newChunk chunkenc.Chunk
recoded bool
)
newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
cmint = s.t
}
chunk = newChunk
}
case chunkenc.EncFloatHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevHApp, _ := prevApp.(*chunkenc.FloatHistogramAppender)
var (
newChunk chunkenc.Chunk
recoded bool
)
newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
cmint = s.t
}
chunk = newChunk
}
}
cmaxt = s.t
prevEncoding = encoding
}
if prevEncoding != chunkenc.EncNone {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
return chks, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/block_test.go | tsdb/block_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"encoding/binary"
"errors"
"fmt"
"hash/crc32"
"math/rand"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"testing"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/wlog"
)
// In Prometheus 2.1.0 we had a bug where the meta.json version was falsely bumped
// to 2. We had a migration in place resetting it to 1 but we should move immediately to
// version 3 next time to avoid confusion and issues.
func TestBlockMetaMustNeverBeVersion2(t *testing.T) {
dir := t.TempDir()
_, err := writeMetaFile(promslog.NewNopLogger(), dir, &BlockMeta{})
require.NoError(t, err)
meta, _, err := readMetaFile(dir)
require.NoError(t, err)
require.NotEqual(t, 2, meta.Version, "meta.json version must never be 2")
}
func TestSetCompactionFailed(t *testing.T) {
tmpdir := t.TempDir()
blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1))
b, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(t, err)
require.False(t, b.meta.Compaction.Failed)
require.NoError(t, b.setCompactionFailed())
require.True(t, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
b, err = OpenBlock(nil, blockDir, nil, nil)
require.NoError(t, err)
require.True(t, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
}
func TestCreateBlock(t *testing.T) {
tmpdir := t.TempDir()
b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil, nil)
require.NoError(t, err)
require.NoError(t, b.Close())
}
func BenchmarkOpenBlock(b *testing.B) {
tmpdir := b.TempDir()
blockDir := createBlock(b, tmpdir, genSeries(1e6, 20, 0, 10))
b.Run("benchmark", func(b *testing.B) {
for b.Loop() {
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(b, err)
require.NoError(b, block.Close())
}
})
}
func TestCorruptedChunk(t *testing.T) {
for _, tc := range []struct {
name string
corrFunc func(f *os.File) // Func that applies the corruption.
openErr error
iterErr error
}{
{
name: "invalid header size",
corrFunc: func(f *os.File) {
require.NoError(t, f.Truncate(1))
},
openErr: errors.New("invalid segment header in segment 0: invalid size"),
},
{
name: "invalid magic number",
corrFunc: func(f *os.File) {
magicChunksOffset := int64(0)
_, err := f.Seek(magicChunksOffset, 0)
require.NoError(t, err)
// Set invalid magic number.
b := make([]byte, chunks.MagicChunksSize)
binary.BigEndian.PutUint32(b[:chunks.MagicChunksSize], 0x00000000)
n, err := f.Write(b)
require.NoError(t, err)
require.Equal(t, chunks.MagicChunksSize, n)
},
openErr: errors.New("invalid magic number 0"),
},
{
name: "invalid chunk format version",
corrFunc: func(f *os.File) {
chunksFormatVersionOffset := int64(4)
_, err := f.Seek(chunksFormatVersionOffset, 0)
require.NoError(t, err)
// Set invalid chunk format version.
b := make([]byte, chunks.ChunksFormatVersionSize)
b[0] = 0
n, err := f.Write(b)
require.NoError(t, err)
require.Equal(t, chunks.ChunksFormatVersionSize, n)
},
openErr: errors.New("invalid chunk format version 0"),
},
{
name: "chunk not enough bytes to read the chunk length",
corrFunc: func(f *os.File) {
// Truncate one byte after the segment header.
require.NoError(t, f.Truncate(chunks.SegmentHeaderSize+1))
},
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: segment doesn't include enough bytes to read the chunk size data field - required:13, available:9"),
},
{
name: "chunk not enough bytes to read the data",
corrFunc: func(f *os.File) {
fi, err := f.Stat()
require.NoError(t, err)
require.NoError(t, f.Truncate(fi.Size()-1))
},
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: segment doesn't include enough bytes to read the chunk - required:25, available:24"),
},
{
name: "checksum mismatch",
corrFunc: func(f *os.File) {
fi, err := f.Stat()
require.NoError(t, err)
// Get the chunk data end offset.
chkEndOffset := int(fi.Size()) - crc32.Size
// Seek to the last byte of chunk data and modify it.
_, err = f.Seek(int64(chkEndOffset-1), 0)
require.NoError(t, err)
n, err := f.Write([]byte("x"))
require.NoError(t, err)
require.Equal(t, 1, n)
},
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:231bddcf, actual:d85ad10d"),
},
} {
t.Run(tc.name, func(t *testing.T) {
tmpdir := t.TempDir()
series := storage.NewListSeries(labels.FromStrings("a", "b"), []chunks.Sample{sample{1, 1, nil, nil}})
blockDir := createBlock(t, tmpdir, []storage.Series{series})
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
require.NotEmpty(t, files, "No chunk created.")
f, err := os.OpenFile(files[0], os.O_RDWR, 0o666)
require.NoError(t, err)
// Apply corruption function.
tc.corrFunc(f)
require.NoError(t, f.Close())
// Check open err.
b, err := OpenBlock(nil, blockDir, nil, nil)
if tc.openErr != nil {
require.EqualError(t, err, tc.openErr.Error())
return
}
defer func() { require.NoError(t, b.Close()) }()
querier, err := NewBlockQuerier(b, 0, 1)
require.NoError(t, err)
defer func() { require.NoError(t, querier.Close()) }()
set := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
// Check chunk errors during iter time.
require.True(t, set.Next())
it := set.At().Iterator(nil)
require.Equal(t, chunkenc.ValNone, it.Next())
require.EqualError(t, it.Err(), tc.iterErr.Error())
})
}
}
func sequenceFiles(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var res []string
for _, fi := range files {
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
continue
}
res = append(res, filepath.Join(dir, fi.Name()))
}
return res, nil
}
func TestLabelValuesWithMatchers(t *testing.T) {
tmpdir := t.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series
for i := range 100 {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"tens", fmt.Sprintf("value%d", i/10),
"unique", fmt.Sprintf("value%d", i),
), []chunks.Sample{sample{100, 0, nil, nil}}))
}
blockDir := createBlock(t, tmpdir, seriesEntries)
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
require.NotEmpty(t, files, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(t, err)
defer func() { require.NoError(t, block.Close()) }()
indexReader, err := block.Index()
require.NoError(t, err)
defer func() { require.NoError(t, indexReader.Close()) }()
var uniqueWithout30s []string
for i := range 100 {
if i/10 != 3 {
uniqueWithout30s = append(uniqueWithout30s, fmt.Sprintf("value%d", i))
}
}
sort.Strings(uniqueWithout30s)
testCases := []struct {
name string
labelName string
matchers []*labels.Matcher
expectedValues []string
}{
{
name: "get tens based on unique id",
labelName: "tens",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "unique", "value35")},
expectedValues: []string{"value3"},
}, {
name: "get unique ids based on a ten",
labelName: "unique",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "tens", "value1")},
expectedValues: []string{"value10", "value11", "value12", "value13", "value14", "value15", "value16", "value17", "value18", "value19"},
}, {
name: "get tens by pattern matching on unique id",
labelName: "tens",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "unique", "value[5-7]5")},
expectedValues: []string{"value5", "value6", "value7"},
}, {
name: "get tens by matching for presence of unique label",
labelName: "tens",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "unique", "")},
expectedValues: []string{"value0", "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9"},
}, {
name: "get unique IDs based on tens not being equal to a certain value, while not empty",
labelName: "unique",
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchNotEqual, "tens", "value3"),
labels.MustNewMatcher(labels.MatchNotEqual, "tens", ""),
},
expectedValues: uniqueWithout30s,
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
actualValues, err := indexReader.SortedLabelValues(ctx, tt.labelName, nil, tt.matchers...)
require.NoError(t, err)
require.Equal(t, tt.expectedValues, actualValues)
actualValues, err = indexReader.LabelValues(ctx, tt.labelName, nil, tt.matchers...)
sort.Strings(actualValues)
require.NoError(t, err)
require.Equal(t, tt.expectedValues, actualValues)
})
}
}
func TestBlockQuerierReturnsSortedLabelValues(t *testing.T) {
tmpdir := t.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series
for i := 100; i > 0; i-- {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"__name__", fmt.Sprintf("value%d", i),
), []chunks.Sample{sample{100, 0, nil, nil}}))
}
blockDir := createBlock(t, tmpdir, seriesEntries)
// Check open err.
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, block.Close()) })
q, err := newBlockBaseQuerier(block, 0, 100)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, q.Close()) })
res, _, err := q.LabelValues(ctx, "__name__", nil)
require.NoError(t, err)
require.True(t, slices.IsSorted(res))
}
// TestBlockSize ensures that the block size is calculated correctly.
func TestBlockSize(t *testing.T) {
tmpdir := t.TempDir()
var (
blockInit *Block
expSizeInit int64
blockDirInit string
err error
)
// Create a block and compare the reported size vs actual disk size.
{
blockDirInit = createBlock(t, tmpdir, genSeries(10, 1, 1, 100))
blockInit, err = OpenBlock(nil, blockDirInit, nil, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, blockInit.Close())
}()
expSizeInit = blockInit.Size()
actSizeInit, err := fileutil.DirSize(blockInit.Dir())
require.NoError(t, err)
require.Equal(t, expSizeInit, actSizeInit)
}
// Delete some series and check the sizes again.
{
require.NoError(t, blockInit.Delete(context.Background(), 1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")))
expAfterDelete := blockInit.Size()
require.Greater(t, expAfterDelete, expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
actAfterDelete, err := fileutil.DirSize(blockDirInit)
require.NoError(t, err)
require.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size")
c, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, nil)
require.NoError(t, err)
blockDirsAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil)
require.NoError(t, err)
require.Len(t, blockDirsAfterCompact, 1)
blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirsAfterCompact[0].String()), nil, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, blockAfterCompact.Close())
}()
expAfterCompact := blockAfterCompact.Size()
actAfterCompact, err := fileutil.DirSize(blockAfterCompact.Dir())
require.NoError(t, err)
require.Greater(t, actAfterDelete, actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact)
require.Equal(t, expAfterCompact, actAfterCompact, "after a delete and compaction reported block size doesn't match actual disk size")
}
}
func TestReadIndexFormatV1(t *testing.T) {
/* The block here was produced at the commit
706602daed1487f7849990678b4ece4599745905 used in 2.0.0 with:
db, _ := Open("v1db", nil, nil, nil)
app := db.Appender()
app.Add(labels.FromStrings("foo", "bar"), 1, 2)
app.Add(labels.FromStrings("foo", "baz"), 3, 4)
app.Add(labels.FromStrings("foo", "meh"), 1000*3600*4, 4) // Not in the block.
// Make sure we've enough values for the lack of sorting of postings offsets to show up.
for i := 0; i < 100; i++ {
app.Add(labels.FromStrings("bar", strconv.FormatInt(int64(i), 10)), 0, 0)
}
app.Commit()
db.compact()
db.Close()
*/
blockDir := filepath.Join("testdata", "index_format_v1")
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(t, err)
q, err := NewBlockQuerier(block, 0, 1000)
require.NoError(t, err)
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")))
q, err = NewBlockQuerier(block, 0, 1000)
require.NoError(t, err)
require.Equal(t, map[string][]chunks.Sample{
`{foo="bar"}`: {sample{t: 1, f: 2}},
`{foo="baz"}`: {sample{t: 3, f: 4}},
}, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")))
}
func BenchmarkLabelValuesWithMatchers(b *testing.B) {
tmpdir := b.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series
metricCount := 1000000
for i := range metricCount {
// Note these series are not created in sort order: 'value2' sorts after 'value10'.
// This makes a big difference to the benchmark timing.
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"a_unique", fmt.Sprintf("value%d", i),
"b_tens", fmt.Sprintf("value%d", i/(metricCount/10)),
"c_ninety", fmt.Sprintf("value%d", i/(metricCount/10)/9), // "0" for the first 90%, then "1"
), []chunks.Sample{sample{100, 0, nil, nil}}))
}
blockDir := createBlock(b, tmpdir, seriesEntries)
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(b, err)
require.NotEmpty(b, files, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(b, err)
defer func() { require.NoError(b, block.Close()) }()
indexReader, err := block.Index()
require.NoError(b, err)
defer func() { require.NoError(b, indexReader.Close()) }()
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "c_ninety", "value0")}
b.ReportAllocs()
for b.Loop() {
actualValues, err := indexReader.LabelValues(ctx, "b_tens", nil, matchers...)
require.NoError(b, err)
require.Len(b, actualValues, 9)
}
}
func TestLabelNamesWithMatchers(t *testing.T) {
tmpdir := t.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series
for i := range 100 {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"unique", fmt.Sprintf("value%d", i),
), []chunks.Sample{sample{100, 0, nil, nil}}))
if i%10 == 0 {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"tens", fmt.Sprintf("value%d", i/10),
"unique", fmt.Sprintf("value%d", i),
), []chunks.Sample{sample{100, 0, nil, nil}}))
}
if i%20 == 0 {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"tens", fmt.Sprintf("value%d", i/10),
"twenties", fmt.Sprintf("value%d", i/20),
"unique", fmt.Sprintf("value%d", i),
), []chunks.Sample{sample{100, 0, nil, nil}}))
}
}
blockDir := createBlock(t, tmpdir, seriesEntries)
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
require.NotEmpty(t, files, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, block.Close()) })
indexReader, err := block.Index()
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, indexReader.Close()) })
testCases := []struct {
name string
labelName string
matchers []*labels.Matcher
expectedNames []string
}{
{
name: "get with non-empty unique: all",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "unique", "")},
expectedNames: []string{"tens", "twenties", "unique"},
}, {
name: "get with unique ending in 1: only unique",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "unique", "value.*1")},
expectedNames: []string{"unique"},
}, {
name: "get with unique = value20: all",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "unique", "value20")},
expectedNames: []string{"tens", "twenties", "unique"},
}, {
name: "get tens = 1: unique & tens",
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "tens", "value1")},
expectedNames: []string{"tens", "unique"},
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
actualNames, err := indexReader.LabelNames(ctx, tt.matchers...)
require.NoError(t, err)
require.Equal(t, tt.expectedNames, actualNames)
})
}
}
func TestBlockIndexReader_PostingsForLabelMatching(t *testing.T) {
testPostingsForLabelMatching(t, 2, func(t *testing.T, series []labels.Labels) IndexReader {
var seriesEntries []storage.Series
for _, s := range series {
seriesEntries = append(seriesEntries, storage.NewListSeries(s, []chunks.Sample{sample{100, 0, nil, nil}}))
}
blockDir := createBlock(t, t.TempDir(), seriesEntries)
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
require.NotEmpty(t, files, "No chunk created.")
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, block.Close()) })
ir, err := block.Index()
require.NoError(t, err)
return ir
})
}
func testPostingsForLabelMatching(t *testing.T, offset storage.SeriesRef, setUp func(*testing.T, []labels.Labels) IndexReader) {
t.Helper()
ctx := context.Background()
series := []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
}
ir := setUp(t, series)
t.Cleanup(func() {
require.NoError(t, ir.Close())
})
testCases := []struct {
name string
labelName string
match func(string) bool
exp []storage.SeriesRef
}{
{
name: "n=1",
labelName: "n",
match: func(val string) bool {
return val == "1"
},
exp: []storage.SeriesRef{offset + 1, offset + 2, offset + 3},
},
{
name: "n=2",
labelName: "n",
match: func(val string) bool {
return val == "2"
},
exp: []storage.SeriesRef{offset + 4},
},
{
name: "missing label",
labelName: "missing",
match: func(string) bool {
return true
},
exp: nil,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
p := ir.PostingsForLabelMatching(ctx, tc.labelName, tc.match)
require.NotNil(t, p)
srs, err := index.ExpandPostings(p)
require.NoError(t, err)
require.Equal(t, tc.exp, srs)
})
}
}
// createBlock creates a block with given set of series and returns its dir.
func createBlock(tb testing.TB, dir string, series []storage.Series) string {
blockDir, err := CreateBlock(series, dir, 0, promslog.NewNopLogger())
require.NoError(tb, err)
return blockDir
}
func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, nil)
require.NoError(tb, err)
require.NoError(tb, os.MkdirAll(dir, 0o777))
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
ulids, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil)
require.NoError(tb, err)
require.Len(tb, ulids, 1)
return filepath.Join(dir, ulids[0].String())
}
func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string {
compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, nil)
require.NoError(tb, err)
require.NoError(tb, os.MkdirAll(dir, 0o777))
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
ulids, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil)
require.NoError(tb, err)
require.Len(tb, ulids, 1)
return filepath.Join(dir, ulids[0].String())
}
func createHead(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir string) *Head {
opts := DefaultHeadOptions()
opts.ChunkDirRoot = chunkDir
head, err := NewHead(nil, nil, w, nil, opts, nil)
require.NoError(tb, err)
var it chunkenc.Iterator
ctx := context.Background()
app := head.Appender(ctx)
for _, s := range series {
ref := storage.SeriesRef(0)
it = s.Iterator(it)
lset := s.Labels()
typ := it.Next()
lastTyp := typ
for ; typ != chunkenc.ValNone; typ = it.Next() {
if lastTyp != typ {
// The behaviour of appender is undefined if samples of different types
// are appended to the same series in a single Commit().
require.NoError(tb, app.Commit())
app = head.Appender(ctx)
}
switch typ {
case chunkenc.ValFloat:
t, v := it.At()
ref, err = app.Append(ref, lset, t, v)
case chunkenc.ValHistogram:
t, h := it.AtHistogram(nil)
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
case chunkenc.ValFloatHistogram:
t, fh := it.AtFloatHistogram(nil)
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
default:
err = fmt.Errorf("unknown sample type %s", typ.String())
}
require.NoError(tb, err)
lastTyp = typ
}
require.NoError(tb, it.Err())
}
require.NoError(tb, app.Commit())
return head
}
func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir string, oooSampleFrequency int) *Head {
opts := DefaultHeadOptions()
opts.ChunkDirRoot = chunkDir
opts.OutOfOrderTimeWindow.Store(10000000000)
head, err := NewHead(nil, nil, w, nil, opts, nil)
require.NoError(tb, err)
oooSampleLabels := make([]labels.Labels, 0, len(series))
oooSamples := make([]chunks.SampleSlice, 0, len(series))
var it chunkenc.Iterator
totalSamples := 0
app := head.Appender(context.Background())
for _, s := range series {
ref := storage.SeriesRef(0)
it = s.Iterator(it)
lset := s.Labels()
os := chunks.SampleSlice{}
count := 0
for it.Next() == chunkenc.ValFloat {
totalSamples++
count++
t, v := it.At()
if count%oooSampleFrequency == 0 {
os = append(os, sample{t: t, f: v})
continue
}
ref, err = app.Append(ref, lset, t, v)
require.NoError(tb, err)
}
require.NoError(tb, it.Err())
if len(os) > 0 {
oooSampleLabels = append(oooSampleLabels, lset)
oooSamples = append(oooSamples, os)
}
}
require.NoError(tb, app.Commit())
oooSamplesAppended := 0
require.Equal(tb, float64(0), prom_testutil.ToFloat64(head.metrics.outOfOrderSamplesAppended))
app = head.Appender(context.Background())
for i, lset := range oooSampleLabels {
ref := storage.SeriesRef(0)
for _, sample := range oooSamples[i] {
ref, err = app.Append(ref, lset, sample.T(), sample.F())
require.NoError(tb, err)
oooSamplesAppended++
}
}
require.NoError(tb, app.Commit())
actOOOAppended := prom_testutil.ToFloat64(head.metrics.outOfOrderSamplesAppended)
require.GreaterOrEqual(tb, actOOOAppended, float64(oooSamplesAppended-len(series)))
require.LessOrEqual(tb, actOOOAppended, float64(oooSamplesAppended))
require.Equal(tb, float64(totalSamples), prom_testutil.ToFloat64(head.metrics.samplesAppended))
return head
}
const (
defaultLabelName = "labelName"
defaultLabelValue = "labelValue"
)
// genSeries generates series of float64 samples with a given number of labels and values.
func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series {
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, 1, func(ts int64) chunks.Sample {
return sample{t: ts, f: rand.Float64()}
})
}
// genHistogramSeries generates series of histogram samples with a given number of labels and values.
func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series {
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) chunks.Sample {
h := &histogram.Histogram{
Count: 7 + uint64(ts*5),
ZeroCount: 2 + uint64(ts),
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
}
if ts != mint {
// By setting the counter reset hint to "no counter
// reset" for all histograms but the first, we cover the
// most common cases. If the series is manipulated later
// or spans more than one block when ingested into the
// storage, the hint has to be adjusted. Note that the
// storage itself treats this particular hint the same
// as "unknown".
h.CounterResetHint = histogram.NotCounterReset
}
if floatHistogram {
return sample{t: ts, fh: h.ToFloat(nil)}
}
return sample{t: ts, h: h}
})
}
// genHistogramAndFloatSeries generates series of mixed histogram and float64 samples with a given number of labels and values.
func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series {
floatSample := false
count := 0
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) chunks.Sample {
count++
var s sample
if floatSample {
s = sample{t: ts, f: rand.Float64()}
} else {
h := &histogram.Histogram{
Count: 7 + uint64(ts*5),
ZeroCount: 2 + uint64(ts),
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
}
if count > 1 && count%5 != 1 {
// Same rationale for this as above in
// genHistogramSeries, just that we have to be
// smarter to find out if the previous sample
// was a histogram, too.
h.CounterResetHint = histogram.NotCounterReset
}
if floatHistogram {
s = sample{t: ts, fh: h.ToFloat(nil)}
} else {
s = sample{t: ts, h: h}
}
}
if count%5 == 0 {
// Flip the sample type for every 5 samples.
floatSample = !floatSample
}
return s
})
}
func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step int64, generator func(ts int64) chunks.Sample) []storage.Series {
if totalSeries == 0 || labelCount == 0 {
return nil
}
series := make([]storage.Series, totalSeries)
for i := range totalSeries {
lbls := make(map[string]string, labelCount)
lbls[defaultLabelName] = strconv.Itoa(i)
for j := 1; len(lbls) < labelCount; j++ {
lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j)
}
samples := make([]chunks.Sample, 0, (maxt-mint)/step+1)
for t := mint; t < maxt; t += step {
samples = append(samples, generator(t))
}
series[i] = storage.NewListSeries(labels.FromMap(lbls), samples)
}
return series
}
// populateSeries generates series from given labels, mint and maxt.
func populateSeries(lbls []map[string]string, mint, maxt int64) []storage.Series {
if len(lbls) == 0 {
return nil
}
series := make([]storage.Series, 0, len(lbls))
for _, lbl := range lbls {
if len(lbl) == 0 {
continue
}
samples := make([]chunks.Sample, 0, maxt-mint+1)
for t := mint; t <= maxt; t++ {
samples = append(samples, sample{t: t, f: rand.Float64()})
}
series = append(series, storage.NewListSeries(labels.FromMap(lbl), samples))
}
return series
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/block.go | tsdb/block.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"slices"
"sync"
"github.com/oklog/ulid/v2"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
// IndexWriter serializes the index for a block of series data.
// The methods must be called in the order they are specified in.
type IndexWriter interface {
// AddSymbol registers a single symbol.
// Symbols must be registered in sorted order.
AddSymbol(sym string) error
// AddSeries populates the index writer with a series and its offsets
// of chunks that the index can reference.
// Implementations may require series to be insert in strictly increasing order by
// their labels. The reference numbers are used to resolve entries in postings lists
// that are added later.
AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error
// Close writes any finalization and closes the resources associated with
// the underlying writer.
Close() error
}
// IndexReader provides reading access of serialized index data.
type IndexReader interface {
// Symbols return an iterator over sorted string symbols that may occur in
// series' labels and indices. It is not safe to use the returned strings
// beyond the lifetime of the index reader.
Symbols() index.StringIter
// SortedLabelValues returns sorted possible label values.
SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error)
// LabelValues returns possible label values which may not be sorted.
LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error)
// Postings returns the postings list iterator for the label pairs.
// The Postings here contain the offsets to the series inside the index.
// Found IDs are not strictly required to point to a valid Series, e.g.
// during background garbage collections.
Postings(ctx context.Context, name string, values ...string) (index.Postings, error)
// PostingsForLabelMatching returns a sorted iterator over postings having a label with the given name and a value for which match returns true.
// If no postings are found having at least one matching label, an empty iterator is returned.
PostingsForLabelMatching(ctx context.Context, name string, match func(value string) bool) index.Postings
// PostingsForAllLabelValues returns a sorted iterator over all postings having a label with the given name.
// If no postings are found with the label in question, an empty iterator is returned.
PostingsForAllLabelValues(ctx context.Context, name string) index.Postings
// SortedPostings returns a postings list that is reordered to be sorted
// by the label set of the underlying series.
SortedPostings(index.Postings) index.Postings
// ShardedPostings returns a postings list filtered by the provided shardIndex
// out of shardCount. For a given posting, its shard MUST be computed hashing
// the series labels mod shardCount, using a hash function which is consistent over time.
ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings
// Series populates the given builder and chunk metas for the series identified
// by the reference.
// Returns storage.ErrNotFound if the ref does not resolve to a known series.
Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error
// LabelNames returns all the unique label names present in the index in sorted order.
LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error)
// LabelNamesFor returns all the label names for the series referred to by the postings.
// The names returned are sorted.
LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error)
// Close releases the underlying resources of the reader.
Close() error
}
// ChunkWriter serializes a time block of chunked series data.
type ChunkWriter interface {
// WriteChunks writes several chunks. The Chunk field of the ChunkMetas
// must be populated.
// After returning successfully, the Ref fields in the ChunkMetas
// are set and can be used to retrieve the chunks from the written data.
WriteChunks(chunks ...chunks.Meta) error
// Close writes any required finalization and closes the resources
// associated with the underlying writer.
Close() error
}
// ChunkReader provides reading access of serialized time series data.
type ChunkReader interface {
// ChunkOrIterable returns the series data for the given chunks.Meta.
// Either a single chunk will be returned, or an iterable.
// A single chunk should be returned if chunks.Meta maps to a chunk that
// already exists and doesn't need modifications.
// An iterable should be returned if chunks.Meta maps to a subset of the
// samples in a stored chunk, or multiple chunks. (E.g. OOOHeadChunkReader
// could return an iterable where multiple histogram samples have counter
// resets. There can only be one counter reset per histogram chunk so
// multiple chunks would be created from the iterable in this case.)
// Only one of chunk or iterable should be returned. In some cases you may
// always expect a chunk to be returned. You can check that iterable is nil
// in those cases.
ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error)
// Close releases all underlying resources of the reader.
Close() error
}
// BlockReader provides reading access to a data block.
type BlockReader interface {
// Index returns an IndexReader over the block's data.
Index() (IndexReader, error)
// Chunks returns a ChunkReader over the block's data.
Chunks() (ChunkReader, error)
// Tombstones returns a tombstones.Reader over the block's deleted data.
Tombstones() (tombstones.Reader, error)
// Meta provides meta information about the block reader.
Meta() BlockMeta
// Size returns the number of bytes that the block takes up on disk.
Size() int64
}
// BlockMeta provides meta information about a block.
type BlockMeta struct {
// Unique identifier for the block and its contents. Changes on compaction.
ULID ulid.ULID `json:"ulid"`
// MinTime and MaxTime specify the time range all samples
// in the block are in.
MinTime int64 `json:"minTime"`
MaxTime int64 `json:"maxTime"`
// Stats about the contents of the block.
Stats BlockStats `json:"stats,omitempty"`
// Information on compactions the block was created from.
Compaction BlockMetaCompaction `json:"compaction"`
// Version of the index format.
Version int `json:"version"`
}
// BlockStats contains stats about contents of a block.
type BlockStats struct {
NumSamples uint64 `json:"numSamples,omitempty"`
NumFloatSamples uint64 `json:"numFloatSamples,omitempty"`
NumHistogramSamples uint64 `json:"numHistogramSamples,omitempty"`
NumSeries uint64 `json:"numSeries,omitempty"`
NumChunks uint64 `json:"numChunks,omitempty"`
NumTombstones uint64 `json:"numTombstones,omitempty"`
}
// BlockDesc describes a block by ULID and time range.
type BlockDesc struct {
ULID ulid.ULID `json:"ulid"`
MinTime int64 `json:"minTime"`
MaxTime int64 `json:"maxTime"`
}
// BlockMetaCompaction holds information about compactions a block went through.
type BlockMetaCompaction struct {
// Maximum number of compaction cycles any source block has
// gone through.
Level int `json:"level"`
// ULIDs of all source head blocks that went into the block.
Sources []ulid.ULID `json:"sources,omitempty"`
// Indicates that during compaction it resulted in a block without any samples
// so it should be deleted on the next reloadBlocks.
Deletable bool `json:"deletable,omitempty"`
// Short descriptions of the direct blocks that were used to create
// this block.
Parents []BlockDesc `json:"parents,omitempty"`
Failed bool `json:"failed,omitempty"`
// Additional information about the compaction, for example, block created from out-of-order chunks.
Hints []string `json:"hints,omitempty"`
}
func (bm *BlockMetaCompaction) SetOutOfOrder() {
if bm.FromOutOfOrder() {
return
}
bm.Hints = append(bm.Hints, CompactionHintFromOutOfOrder)
slices.Sort(bm.Hints)
}
func (bm *BlockMetaCompaction) FromOutOfOrder() bool {
return slices.Contains(bm.Hints, CompactionHintFromOutOfOrder)
}
const (
indexFilename = "index"
metaFilename = "meta.json"
metaVersion1 = 1
// CompactionHintFromOutOfOrder is a hint noting that the block
// was created from out-of-order chunks.
CompactionHintFromOutOfOrder = "from-out-of-order"
)
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
func readMetaFile(dir string) (*BlockMeta, int64, error) {
b, err := os.ReadFile(filepath.Join(dir, metaFilename))
if err != nil {
return nil, 0, err
}
var m BlockMeta
if err := json.Unmarshal(b, &m); err != nil {
return nil, 0, err
}
if m.Version != metaVersion1 {
return nil, 0, fmt.Errorf("unexpected meta file version %d", m.Version)
}
return &m, int64(len(b)), nil
}
func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, error) {
meta.Version = metaVersion1
// Make any changes to the file appear atomic.
path := filepath.Join(dir, metaFilename)
tmp := path + ".tmp"
defer func() {
if err := os.RemoveAll(tmp); err != nil {
logger.Error("remove tmp file", "err", err.Error())
}
}()
f, err := os.Create(tmp)
if err != nil {
return 0, err
}
jsonMeta, err := json.MarshalIndent(meta, "", "\t")
if err != nil {
return 0, err
}
n, err := f.Write(jsonMeta)
if err != nil {
return 0, tsdb_errors.NewMulti(err, f.Close()).Err()
}
// Force the kernel to persist the file on disk to avoid data loss if the host crashes.
if err := f.Sync(); err != nil {
return 0, tsdb_errors.NewMulti(err, f.Close()).Err()
}
if err := f.Close(); err != nil {
return 0, err
}
return int64(n), fileutil.Replace(tmp, path)
}
// Block represents a directory of time series data covering a continuous time range.
type Block struct {
mtx sync.RWMutex
closing bool
pendingReaders sync.WaitGroup
dir string
meta BlockMeta
// Symbol Table Size in bytes.
// We maintain this variable to avoid recalculation every time.
symbolTableSize uint64
chunkr ChunkReader
indexr IndexReader
tombstones tombstones.Reader
logger *slog.Logger
numBytesChunks int64
numBytesIndex int64
numBytesTombstone int64
numBytesMeta int64
}
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
// to instantiate chunk structs.
func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool, postingsDecoderFactory PostingsDecoderFactory) (pb *Block, err error) {
if logger == nil {
logger = promslog.NewNopLogger()
}
var closers []io.Closer
defer func() {
if err != nil {
err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err()
}
}()
meta, sizeMeta, err := readMetaFile(dir)
if err != nil {
return nil, err
}
cr, err := chunks.NewDirReader(chunkDir(dir), pool)
if err != nil {
return nil, err
}
closers = append(closers, cr)
decoder := index.DecodePostingsRaw
if postingsDecoderFactory != nil {
decoder = postingsDecoderFactory(meta)
}
ir, err := index.NewFileReader(filepath.Join(dir, indexFilename), decoder)
if err != nil {
return nil, err
}
closers = append(closers, ir)
tr, sizeTomb, err := tombstones.ReadTombstones(dir)
if err != nil {
return nil, err
}
closers = append(closers, tr)
pb = &Block{
dir: dir,
meta: *meta,
chunkr: cr,
indexr: ir,
tombstones: tr,
symbolTableSize: ir.SymbolTableSize(),
logger: logger,
numBytesChunks: cr.Size(),
numBytesIndex: ir.Size(),
numBytesTombstone: sizeTomb,
numBytesMeta: sizeMeta,
}
return pb, nil
}
// Close closes the on-disk block. It blocks as long as there are readers reading from the block.
func (pb *Block) Close() error {
pb.mtx.Lock()
pb.closing = true
pb.mtx.Unlock()
pb.pendingReaders.Wait()
return tsdb_errors.NewMulti(
pb.chunkr.Close(),
pb.indexr.Close(),
pb.tombstones.Close(),
).Err()
}
func (pb *Block) String() string {
return pb.meta.ULID.String()
}
// Dir returns the directory of the block.
func (pb *Block) Dir() string { return pb.dir }
// Meta returns meta information about the block.
func (pb *Block) Meta() BlockMeta { return pb.meta }
// MinTime returns the min time of the meta.
func (pb *Block) MinTime() int64 { return pb.meta.MinTime }
// MaxTime returns the max time of the meta.
func (pb *Block) MaxTime() int64 { return pb.meta.MaxTime }
// Size returns the number of bytes that the block takes up.
func (pb *Block) Size() int64 {
return pb.numBytesChunks + pb.numBytesIndex + pb.numBytesTombstone + pb.numBytesMeta
}
// ErrClosing is returned when a block is in the process of being closed.
var ErrClosing = errors.New("block is closing")
func (pb *Block) startRead() error {
pb.mtx.RLock()
defer pb.mtx.RUnlock()
if pb.closing {
return ErrClosing
}
pb.pendingReaders.Add(1)
return nil
}
// Index returns a new IndexReader against the block data.
func (pb *Block) Index() (IndexReader, error) {
if err := pb.startRead(); err != nil {
return nil, err
}
return blockIndexReader{ir: pb.indexr, b: pb}, nil
}
// Chunks returns a new ChunkReader against the block data.
func (pb *Block) Chunks() (ChunkReader, error) {
if err := pb.startRead(); err != nil {
return nil, err
}
return blockChunkReader{ChunkReader: pb.chunkr, b: pb}, nil
}
// Tombstones returns a new TombstoneReader against the block data.
func (pb *Block) Tombstones() (tombstones.Reader, error) {
if err := pb.startRead(); err != nil {
return nil, err
}
return blockTombstoneReader{Reader: pb.tombstones, b: pb}, nil
}
// GetSymbolTableSize returns the Symbol Table Size in the index of this block.
func (pb *Block) GetSymbolTableSize() uint64 {
return pb.symbolTableSize
}
func (pb *Block) setCompactionFailed() error {
pb.meta.Compaction.Failed = true
n, err := writeMetaFile(pb.logger, pb.dir, &pb.meta)
if err != nil {
return err
}
pb.numBytesMeta = n
return nil
}
type blockIndexReader struct {
ir IndexReader
b *Block
}
func (r blockIndexReader) Symbols() index.StringIter {
return r.ir.Symbols()
}
func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
var st []string
var err error
if len(matchers) == 0 {
st, err = r.ir.SortedLabelValues(ctx, name, hints)
} else {
st, err = r.LabelValues(ctx, name, hints, matchers...)
if err == nil {
slices.Sort(st)
}
}
if err != nil {
return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err)
}
return st, nil
}
func (r blockIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) == 0 {
st, err := r.ir.LabelValues(ctx, name, hints)
if err != nil {
return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err)
}
return st, nil
}
return labelValuesWithMatchers(ctx, r.ir, name, hints, matchers...)
}
func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) == 0 {
return r.b.LabelNames(ctx)
}
return labelNamesWithMatchers(ctx, r.ir, matchers...)
}
func (r blockIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
p, err := r.ir.Postings(ctx, name, values...)
if err != nil {
return p, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err)
}
return p, nil
}
func (r blockIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings {
return r.ir.PostingsForLabelMatching(ctx, name, match)
}
func (r blockIndexReader) PostingsForAllLabelValues(ctx context.Context, name string) index.Postings {
return r.ir.PostingsForAllLabelValues(ctx, name)
}
func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
return r.ir.SortedPostings(p)
}
func (r blockIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
return r.ir.ShardedPostings(p, shardIndex, shardCount)
}
func (r blockIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
if err := r.ir.Series(ref, builder, chks); err != nil {
return fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err)
}
return nil
}
func (r blockIndexReader) Close() error {
r.b.pendingReaders.Done()
return nil
}
// LabelNamesFor returns all the label names for the series referred to by the postings.
// The names returned are sorted.
func (r blockIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
return r.ir.LabelNamesFor(ctx, postings)
}
type blockTombstoneReader struct {
tombstones.Reader
b *Block
}
func (r blockTombstoneReader) Close() error {
r.b.pendingReaders.Done()
return nil
}
type blockChunkReader struct {
ChunkReader
b *Block
}
func (r blockChunkReader) Close() error {
r.b.pendingReaders.Done()
return nil
}
// Delete matching series between mint and maxt in the block.
func (pb *Block) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error {
pb.mtx.Lock()
defer pb.mtx.Unlock()
if pb.closing {
return ErrClosing
}
p, err := PostingsForMatchers(ctx, pb.indexr, ms...)
if err != nil {
return fmt.Errorf("select series: %w", err)
}
ir := pb.indexr
// Choose only valid postings which have chunks in the time-range.
stones := tombstones.NewMemTombstones()
var chks []chunks.Meta
var builder labels.ScratchBuilder
Outer:
for p.Next() {
err := ir.Series(p.At(), &builder, &chks)
if err != nil {
return err
}
for _, chk := range chks {
if chk.OverlapsClosedInterval(mint, maxt) {
// Delete only until the current values and not beyond.
tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime)
stones.AddInterval(p.At(), tombstones.Interval{Mint: tmin, Maxt: tmax})
continue Outer
}
}
}
if p.Err() != nil {
return p.Err()
}
err = pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error {
for _, iv := range ivs {
stones.AddInterval(id, iv)
}
return nil
})
if err != nil {
return err
}
pb.tombstones = stones
pb.meta.Stats.NumTombstones = pb.tombstones.Total()
n, err := tombstones.WriteFile(pb.logger, pb.dir, pb.tombstones)
if err != nil {
return err
}
pb.numBytesTombstone = n
n, err = writeMetaFile(pb.logger, pb.dir, &pb.meta)
if err != nil {
return err
}
pb.numBytesMeta = n
return nil
}
// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).
// If there was a rewrite, then it returns the ULID of new blocks written, else nil.
// If a resultant block is empty (tombstones covered the whole block), then it returns an empty slice.
// It returns a boolean indicating if the parent block can be deleted safely of not.
func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) {
numStones := 0
if err := pb.tombstones.Iter(func(_ storage.SeriesRef, ivs tombstones.Intervals) error {
numStones += len(ivs)
return nil
}); err != nil {
// This should never happen, as the iteration function only returns nil.
panic(err)
}
if numStones == 0 {
return nil, false, nil
}
meta := pb.Meta()
uids, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
if err != nil {
return nil, false, err
}
return uids, true, nil
}
// Snapshot creates snapshot of the block into dir.
func (pb *Block) Snapshot(dir string) error {
blockDir := filepath.Join(dir, pb.meta.ULID.String())
if err := os.MkdirAll(blockDir, 0o777); err != nil {
return fmt.Errorf("create snapshot block dir: %w", err)
}
chunksDir := chunkDir(blockDir)
if err := os.MkdirAll(chunksDir, 0o777); err != nil {
return fmt.Errorf("create snapshot chunk dir: %w", err)
}
// Hardlink meta, index and tombstones
for _, fname := range []string{
metaFilename,
indexFilename,
tombstones.TombstonesFilename,
} {
if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil {
return fmt.Errorf("create snapshot %s: %w", fname, err)
}
}
// Hardlink the chunks
curChunkDir := chunkDir(pb.dir)
files, err := os.ReadDir(curChunkDir)
if err != nil {
return fmt.Errorf("ReadDir the current chunk dir: %w", err)
}
for _, f := range files {
err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name()))
if err != nil {
return fmt.Errorf("hardlink a chunk: %w", err)
}
}
return nil
}
// OverlapsClosedInterval returns true if the block overlaps [mint, maxt].
func (pb *Block) OverlapsClosedInterval(mint, maxt int64) bool {
// The block itself is a half-open interval
// [pb.meta.MinTime, pb.meta.MaxTime).
return pb.meta.MinTime <= maxt && mint < pb.meta.MaxTime
}
// LabelNames returns all the unique label names present in the Block in sorted order.
func (pb *Block) LabelNames(ctx context.Context) ([]string, error) {
return pb.indexr.LabelNames(ctx)
}
func clampInterval(a, b, mint, maxt int64) (int64, int64) {
if a < mint {
a = mint
}
if b > maxt {
b = maxt
}
return a, b
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/ooo_isolation_test.go | tsdb/ooo_isolation_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestOOOIsolation(t *testing.T) {
i := newOOOIsolation()
// Empty state shouldn't have any open reads.
require.False(t, i.HasOpenReadsAtOrBefore(0))
require.False(t, i.HasOpenReadsAtOrBefore(1))
require.False(t, i.HasOpenReadsAtOrBefore(2))
require.False(t, i.HasOpenReadsAtOrBefore(3))
// Add a read.
read1 := i.TrackReadAfter(1)
require.False(t, i.HasOpenReadsAtOrBefore(0))
require.False(t, i.HasOpenReadsAtOrBefore(1))
require.True(t, i.HasOpenReadsAtOrBefore(2))
// Add another overlapping read.
read2 := i.TrackReadAfter(0)
require.False(t, i.HasOpenReadsAtOrBefore(0))
require.True(t, i.HasOpenReadsAtOrBefore(1))
require.True(t, i.HasOpenReadsAtOrBefore(2))
// Close the second read, should now only report open reads for the first read's ref.
read2.Close()
require.False(t, i.HasOpenReadsAtOrBefore(0))
require.False(t, i.HasOpenReadsAtOrBefore(1))
require.True(t, i.HasOpenReadsAtOrBefore(2))
// Close the second read again: this should do nothing and ensures we can safely call Close() multiple times.
read2.Close()
require.False(t, i.HasOpenReadsAtOrBefore(0))
require.False(t, i.HasOpenReadsAtOrBefore(1))
require.True(t, i.HasOpenReadsAtOrBefore(2))
// Closing the first read should indicate no further open reads.
read1.Close()
require.False(t, i.HasOpenReadsAtOrBefore(0))
require.False(t, i.HasOpenReadsAtOrBefore(1))
require.False(t, i.HasOpenReadsAtOrBefore(2))
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_read_test.go | tsdb/head_read_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"sync"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
)
// TestMemSeries_chunk runs a series of tests on memSeries.chunk() calls.
// It will simulate various conditions to ensure all code paths in that function are covered.
func TestMemSeries_chunk(t *testing.T) {
const chunkRange int64 = 100
const chunkStep int64 = 5
appendSamples := func(t *testing.T, s *memSeries, start, end int64, cdm *chunks.ChunkDiskMapper) {
for i := start; i < end; i += chunkStep {
ok, _ := s.append(i, float64(i), 0, chunkOpts{
chunkDiskMapper: cdm,
chunkRange: chunkRange,
samplesPerChunk: DefaultSamplesPerChunk,
})
require.True(t, ok, "sample append failed")
}
}
type setupFn func(*testing.T, *memSeries, *chunks.ChunkDiskMapper)
type callOutput uint8
const (
outOpenHeadChunk callOutput = iota // memSeries.chunk() call returned memSeries.headChunks with headChunk=true & isOpen=true
outClosedHeadChunk // memSeries.chunk() call returned memSeries.headChunks with headChunk=true & isOpen=false
outMmappedChunk // memSeries.chunk() call returned a chunk from memSeries.mmappedChunks with headChunk=false
outErr // memSeries.chunk() call returned an error
)
tests := []struct {
name string
setup setupFn // optional function called just before the test memSeries.chunk() call
inputID chunks.HeadChunkID // requested chunk id for memSeries.chunk() call
expected callOutput
}{
{
name: "call ix=0 on empty memSeries",
inputID: 0,
expected: outErr,
},
{
name: "call ix=1 on empty memSeries",
inputID: 1,
expected: outErr,
},
{
name: "firstChunkID > ix",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.firstChunkID = 5
},
inputID: 1,
expected: outErr,
},
{
name: "call ix=0 on memSeries with no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 0,
expected: outOpenHeadChunk,
},
{
name: "call ix=1 on memSeries with no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 1,
expected: outErr,
},
{
name: "call ix=10 on memSeries with no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 10,
expected: outErr,
},
{
name: "call ix=0 on memSeries with 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 0,
expected: outMmappedChunk,
},
{
name: "call ix=1 on memSeries with 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 1,
expected: outMmappedChunk,
},
{
name: "call ix=3 on memSeries with 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 3,
expected: outOpenHeadChunk,
},
{
name: "call ix=0 on memSeries with 3 mmapped chunks and no headChunk",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.headChunks = nil
},
inputID: 0,
expected: outMmappedChunk,
},
{
name: "call ix=2 on memSeries with 3 mmapped chunks and no headChunk",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.headChunks = nil
},
inputID: 2,
expected: outMmappedChunk,
},
{
name: "call ix=3 on memSeries with 3 mmapped chunks and no headChunk",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.headChunks = nil
},
inputID: 3,
expected: outErr,
},
{
name: "call ix=1 on memSeries with 3 mmapped chunks and closed ChunkDiskMapper",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
cdm.Close()
},
inputID: 1,
expected: outErr,
},
{
name: "call ix=3 on memSeries with 3 mmapped chunks and closed ChunkDiskMapper",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
cdm.Close()
},
inputID: 3,
expected: outOpenHeadChunk,
},
{
name: "call ix=0 on memSeries with 3 head chunks and no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*3, cdm)
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 0,
expected: outClosedHeadChunk,
},
{
name: "call ix=1 on memSeries with 3 head chunks and no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*3, cdm)
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 1,
expected: outClosedHeadChunk,
},
{
name: "call ix=10 on memSeries with 3 head chunks and no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*3, cdm)
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 10,
expected: outErr,
},
{
name: "call ix=0 on memSeries with 3 head chunks and 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 0,
expected: outMmappedChunk,
},
{
name: "call ix=2 on memSeries with 3 head chunks and 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 2,
expected: outMmappedChunk,
},
{
name: "call ix=3 on memSeries with 3 head chunks and 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 3,
expected: outClosedHeadChunk,
},
{
name: "call ix=5 on memSeries with 3 head chunks and 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 5,
expected: outOpenHeadChunk,
},
{
name: "call ix=6 on memSeries with 3 head chunks and 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 6,
expected: outErr,
},
{
name: "call ix=10 on memSeries with 3 head chunks and 3 mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
inputID: 10,
expected: outErr,
},
}
memChunkPool := &sync.Pool{
New: func() any {
return &memChunk{}
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
dir := t.TempDir()
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
require.NoError(t, err)
defer func() {
require.NoError(t, chunkDiskMapper.Close())
}()
series := newMemSeries(labels.EmptyLabels(), 1, 0, true, false)
if tc.setup != nil {
tc.setup(t, series, chunkDiskMapper)
}
chk, headChunk, isOpen, err := series.chunk(tc.inputID, chunkDiskMapper, memChunkPool)
switch tc.expected {
case outOpenHeadChunk:
require.NoError(t, err, "unexpected error")
require.True(t, headChunk, "expected a chunk with headChunk=true but got headChunk=%v", headChunk)
require.True(t, isOpen, "expected a chunk with isOpen=true but got isOpen=%v", isOpen)
case outClosedHeadChunk:
require.NoError(t, err, "unexpected error")
require.True(t, headChunk, "expected a chunk with headChunk=true but got headChunk=%v", headChunk)
require.False(t, isOpen, "expected a chunk with isOpen=false but got isOpen=%v", isOpen)
case outMmappedChunk:
require.NoError(t, err, "unexpected error")
require.False(t, headChunk, "expected a chunk with headChunk=false but got gc=%v", headChunk)
case outErr:
require.Nil(t, chk, "got a non-nil chunk reference returned with an error")
require.Error(t, err)
}
})
}
}
func TestHeadIndexReader_PostingsForLabelMatching(t *testing.T) {
testPostingsForLabelMatching(t, 0, func(t *testing.T, series []labels.Labels) IndexReader {
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = t.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, h.Close())
})
app := h.Appender(context.Background())
for _, s := range series {
app.Append(0, s, 0, 0)
}
require.NoError(t, app.Commit())
ir, err := h.Index()
require.NoError(t, err)
return ir
})
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_append_v2_test.go | tsdb/head_append_v2_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"fmt"
"io"
"math"
"math/rand"
"os"
"path"
"path/filepath"
"reflect"
"slices"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
)
// TODO(bwplotka): Ensure non-ported tests are not deleted from db_test.go when removing AppenderV1 flow (#17632),
// for example:
// * TestChunkNotFoundHeadGCRace
// * TestHeadSeriesChunkRace
// * TestHeadLabelValuesWithMatchers
// * TestHeadLabelNamesWithMatchers
// * TestHeadShardedPostings
// TestHeadAppenderV2_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
// returned results are correct.
func TestHeadAppenderV2_HighConcurrencyReadAndWrite(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
seriesCnt := 1000
readConcurrency := 2
writeConcurrency := 10
startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
qryRange := uint64(5 * time.Minute.Milliseconds())
step := uint64(15 * time.Second / time.Millisecond)
endTs := startTs + uint64(DefaultBlockDuration)
labelSets := make([]labels.Labels, seriesCnt)
for i := range seriesCnt {
labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
}
head.Init(0)
g, ctx := errgroup.WithContext(context.Background())
whileNotCanceled := func(f func() (bool, error)) error {
for ctx.Err() == nil {
cont, err := f()
if err != nil {
return err
}
if !cont {
return nil
}
}
return nil
}
// Create one channel for each write worker, the channels will be used by the coordinator
// go routine to coordinate which timestamps each write worker has to write.
writerTsCh := make([]chan uint64, writeConcurrency)
for writerTsChIdx := range writerTsCh {
writerTsCh[writerTsChIdx] = make(chan uint64)
}
// workerReadyWg is used to synchronize the start of the test,
// we only start the test once all workers signal that they're ready.
var workerReadyWg sync.WaitGroup
workerReadyWg.Add(writeConcurrency + readConcurrency)
// Start the write workers.
for wid := range writeConcurrency {
// Create copy of workerID to be used by worker routine.
workerID := wid
g.Go(func() error {
// The label sets which this worker will write.
workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
// Signal that this worker is ready.
workerReadyWg.Done()
return whileNotCanceled(func() (bool, error) {
ts, ok := <-writerTsCh[workerID]
if !ok {
return false, nil
}
app := head.AppenderV2(ctx)
for i := range workerLabelSets {
// We also use the timestamp as the sample value.
_, err := app.Append(0, workerLabelSets[i], 0, int64(ts), float64(ts), nil, nil, storage.AOptions{})
if err != nil {
return false, fmt.Errorf("Error when appending to head: %w", err)
}
}
return true, app.Commit()
})
})
}
// queryHead is a helper to query the head for a given time range and labelset.
queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
if err != nil {
return nil, err
}
return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
}
// readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
readerTsCh := make(chan uint64)
// Start the read workers.
for wid := range readConcurrency {
// Create copy of threadID to be used by worker routine.
workerID := wid
g.Go(func() error {
querySeriesRef := (seriesCnt / readConcurrency) * workerID
// Signal that this worker is ready.
workerReadyWg.Done()
return whileNotCanceled(func() (bool, error) {
ts, ok := <-readerTsCh
if !ok {
return false, nil
}
querySeriesRef = (querySeriesRef + 1) % seriesCnt
lbls := labelSets[querySeriesRef]
// lbls has a single entry; extract it so we can run a query.
var lbl labels.Label
lbls.Range(func(l labels.Label) {
lbl = l
})
samples, err := queryHead(ts-qryRange, ts, lbl)
if err != nil {
return false, err
}
if len(samples) != 1 {
return false, fmt.Errorf("expected 1 series, got %d", len(samples))
}
series := lbls.String()
expectSampleCnt := qryRange/step + 1
if expectSampleCnt != uint64(len(samples[series])) {
return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
}
for sampleIdx, sample := range samples[series] {
expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
if sample.T() != int64(expectedValue) {
return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
}
if sample.F() != float64(expectedValue) {
return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
}
}
return true, nil
})
})
}
// Start the coordinator go routine.
g.Go(func() error {
currTs := startTs
defer func() {
// End of the test, close all channels to stop the workers.
for _, ch := range writerTsCh {
close(ch)
}
close(readerTsCh)
}()
// Wait until all workers are ready to start the test.
workerReadyWg.Wait()
return whileNotCanceled(func() (bool, error) {
// Send the current timestamp to each of the writers.
for _, ch := range writerTsCh {
select {
case ch <- currTs:
case <-ctx.Done():
return false, nil
}
}
// Once data for at least <qryRange> has been ingested, send the current timestamp to the readers.
if currTs > startTs+qryRange {
select {
case readerTsCh <- currTs - step:
case <-ctx.Done():
return false, nil
}
}
currTs += step
if currTs > endTs {
return false, nil
}
return true, nil
})
})
require.NoError(t, g.Wait())
}
func TestHeadAppenderV2_WALMultiRef(t *testing.T) {
head, w := newTestHead(t, 1000, compression.None, false)
require.NoError(t, head.Init(0))
app := head.AppenderV2(context.Background())
ref1, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 100, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
// Add another sample outside chunk range to mmap a chunk.
app = head.AppenderV2(context.Background())
_, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 1500, 2, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
require.NoError(t, head.Truncate(1600))
app = head.AppenderV2(context.Background())
ref2, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 1700, 3, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 3.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
// Add another sample outside chunk range to mmap a chunk.
app = head.AppenderV2(context.Background())
_, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 2000, 4, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 4.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
require.NotEqual(t, ref1, ref2, "Refs are the same")
require.NoError(t, head.Close())
w, err = wlog.New(nil, nil, w.Dir(), compression.None)
require.NoError(t, err)
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = head.opts.ChunkDirRoot
head, err = NewHead(nil, nil, w, nil, opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(0))
defer func() {
require.NoError(t, head.Close())
}()
q, err := NewBlockQuerier(head, 0, 2100)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
// The samples before the new ref should be discarded since Head truncation
// happens only after compacting the Head.
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {
sample{1700, 3, nil, nil},
sample{2000, 4, nil, nil},
}}, series)
}
func TestHeadAppenderV2_ActiveAppenders(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
defer head.Close()
require.NoError(t, head.Init(0))
// First rollback with no samples.
app := head.AppenderV2(context.Background())
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
require.NoError(t, app.Rollback())
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
// Then commit with no samples.
app = head.AppenderV2(context.Background())
require.NoError(t, app.Commit())
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
// Now rollback with one sample.
app = head.AppenderV2(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 100, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
require.NoError(t, app.Rollback())
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
// Now commit with one sample.
app = head.AppenderV2(context.Background())
_, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 100, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
}
func TestHeadAppenderV2_RaceBetweenSeriesCreationAndGC(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
const totalSeries = 100_000
series := make([]labels.Labels, totalSeries)
for i := range totalSeries {
series[i] = labels.FromStrings("foo", strconv.Itoa(i))
}
done := atomic.NewBool(false)
go func() {
defer done.Store(true)
app := head.AppenderV2(context.Background())
defer func() {
if err := app.Commit(); err != nil {
t.Errorf("Failed to commit: %v", err)
}
}()
for i := range totalSeries {
_, err := app.Append(0, series[i], 0, 100, 1, nil, nil, storage.AOptions{})
if err != nil {
t.Errorf("Failed to append: %v", err)
return
}
}
}()
// Don't check the atomic.Bool on all iterations in order to perform more gc iterations and make the race condition more likely.
for i := 1; i%128 != 0 || !done.Load(); i++ {
head.gc()
}
require.Equal(t, totalSeries, int(head.NumSeries()))
}
func TestHeadAppenderV2_CanGCSeriesCreatedWithoutSamples(t *testing.T) {
for op, finishTxn := range map[string]func(app storage.AppenderTransaction) error{
"after commit": func(app storage.AppenderTransaction) error { return app.Commit() },
"after rollback": func(app storage.AppenderTransaction) error { return app.Rollback() },
} {
t.Run(op, func(t *testing.T) {
chunkRange := time.Hour.Milliseconds()
head, _ := newTestHead(t, chunkRange, compression.None, true)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
firstSampleTime := 10 * chunkRange
{
// Append first sample, it should init head max time to firstSampleTime.
app := head.AppenderV2(context.Background())
_, err := app.Append(0, labels.FromStrings("lbl", "ok"), 0, firstSampleTime, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 1, int(head.NumSeries()))
}
// Append a sample in a time range that is not covered by the chunk range,
// We would create series first and then append no sample.
app := head.AppenderV2(context.Background())
invalidSampleTime := firstSampleTime - chunkRange
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, invalidSampleTime, 2, nil, nil, storage.AOptions{})
require.Error(t, err)
// These are our assumptions: we're not testing them, we're just checking them to make debugging a failed
// test easier if someone refactors the code and breaks these assumptions.
// If these assumptions fail after a refactor, feel free to remove them but make sure that the test is still what we intended to test.
require.NotErrorIs(t, err, storage.ErrOutOfBounds, "Failed to append sample shouldn't take the shortcut that returns storage.ErrOutOfBounds")
require.ErrorIs(t, err, storage.ErrTooOldSample, "Failed to append sample should return storage.ErrTooOldSample, because OOO window was enabled but this sample doesn't fall into it.")
// Do commit or rollback, depending on what we're testing.
require.NoError(t, finishTxn(app))
// Garbage-collect, since we finished the transaction and series has no samples, it should be collectable.
head.gc()
require.Equal(t, 1, int(head.NumSeries()))
})
}
}
func TestHeadAppenderV2_DeleteSimple(t *testing.T) {
buildSmpls := func(s []int64) []sample {
ss := make([]sample, 0, len(s))
for _, t := range s {
ss = append(ss, sample{t: t, f: float64(t)})
}
return ss
}
smplsAll := buildSmpls([]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
lblDefault := labels.Label{Name: "a", Value: "b"}
lblsDefault := labels.FromStrings("a", "b")
cases := []struct {
dranges tombstones.Intervals
addSamples []sample // Samples to add after delete.
smplsExp []sample
}{
{
dranges: tombstones.Intervals{{Mint: 0, Maxt: 3}},
smplsExp: buildSmpls([]int64{4, 5, 6, 7, 8, 9}),
},
{
dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}},
smplsExp: buildSmpls([]int64{0, 4, 5, 6, 7, 8, 9}),
},
{
dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
smplsExp: buildSmpls([]int64{0, 8, 9}),
},
{
dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 700}},
smplsExp: buildSmpls([]int64{0}),
},
{ // This case is to ensure that labels and symbols are deleted.
dranges: tombstones.Intervals{{Mint: 0, Maxt: 9}},
smplsExp: buildSmpls([]int64{}),
},
{
dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}},
addSamples: buildSmpls([]int64{11, 13, 15}),
smplsExp: buildSmpls([]int64{0, 4, 5, 6, 7, 8, 9, 11, 13, 15}),
},
{
// After delete, the appended samples in the deleted range should be visible
// as the tombstones are clamped to head min/max time.
dranges: tombstones.Intervals{{Mint: 7, Maxt: 20}},
addSamples: buildSmpls([]int64{11, 13, 15}),
smplsExp: buildSmpls([]int64{0, 1, 2, 3, 4, 5, 6, 11, 13, 15}),
},
}
for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
for _, c := range cases {
head, w := newTestHead(t, 1000, compress, false)
require.NoError(t, head.Init(0))
app := head.AppenderV2(context.Background())
for _, smpl := range smplsAll {
_, err := app.Append(0, lblsDefault, 0, smpl.t, smpl.f, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Delete the ranges.
for _, r := range c.dranges {
require.NoError(t, head.Delete(context.Background(), r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value)))
}
// Add more samples.
app = head.AppenderV2(context.Background())
for _, smpl := range c.addSamples {
_, err := app.Append(0, lblsDefault, 0, smpl.t, smpl.f, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Compare the samples for both heads - before and after the reloadBlocks.
reloadedW, err := wlog.New(nil, nil, w.Dir(), compress) // Use a new wal to ensure deleted samples are gone even after a reloadBlocks.
require.NoError(t, err)
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = reloadedW.Dir()
reloadedHead, err := NewHead(nil, nil, reloadedW, nil, opts, nil)
require.NoError(t, err)
require.NoError(t, reloadedHead.Init(0))
// Compare the query results for both heads - before and after the reloadBlocks.
Outer:
for _, h := range []*Head{head, reloadedHead} {
q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime())
require.NoError(t, err)
actSeriesSet := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value))
require.NoError(t, q.Close())
expSeriesSet := newMockSeriesSet([]storage.Series{
storage.NewListSeries(lblsDefault, func() []chunks.Sample {
ss := make([]chunks.Sample, 0, len(c.smplsExp))
for _, s := range c.smplsExp {
ss = append(ss, s)
}
return ss
}(),
),
})
for {
eok, rok := expSeriesSet.Next(), actSeriesSet.Next()
require.Equal(t, eok, rok)
if !eok {
require.NoError(t, h.Close())
require.NoError(t, actSeriesSet.Err())
require.Empty(t, actSeriesSet.Warnings())
continue Outer
}
expSeries := expSeriesSet.At()
actSeries := actSeriesSet.At()
require.Equal(t, expSeries.Labels(), actSeries.Labels())
smplExp, errExp := storage.ExpandSamples(expSeries.Iterator(nil), nil)
smplRes, errRes := storage.ExpandSamples(actSeries.Iterator(nil), nil)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
}
}
})
}
}
func TestHeadAppenderV2_DeleteUntilCurrMax(t *testing.T) {
hb, _ := newTestHead(t, 1000000, compression.None, false)
defer func() {
require.NoError(t, hb.Close())
}()
numSamples := int64(10)
app := hb.AppenderV2(context.Background())
smpls := make([]float64, numSamples)
for i := range numSamples {
smpls[i] = rand.Float64()
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
require.NoError(t, hb.Delete(context.Background(), 0, 10000, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
// Test the series returns no samples. The series is cleared only after compaction.
q, err := NewBlockQuerier(hb, 0, 100000)
require.NoError(t, err)
res := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.True(t, res.Next(), "series is not present")
s := res.At()
it := s.Iterator(nil)
require.Equal(t, chunkenc.ValNone, it.Next(), "expected no samples")
for res.Next() {
}
require.NoError(t, res.Err())
require.Empty(t, res.Warnings())
// Add again and test for presence.
app = hb.AppenderV2(context.Background())
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 11, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
q, err = NewBlockQuerier(hb, 0, 100000)
require.NoError(t, err)
res = q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.True(t, res.Next(), "series don't exist")
exps := res.At()
it = exps.Iterator(nil)
resSamples, err := storage.ExpandSamples(it, newSample)
require.NoError(t, err)
require.Equal(t, []chunks.Sample{sample{11, 1, nil, nil}}, resSamples)
for res.Next() {
}
require.NoError(t, res.Err())
require.Empty(t, res.Warnings())
}
func TestHeadAppenderV2_DeleteSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
numSamples := 10000
// Enough samples to cause a checkpoint.
hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false)
for i := range numSamples {
app := hb.AppenderV2(context.Background())
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, int64(i), 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
}
require.NoError(t, hb.Delete(context.Background(), 0, int64(numSamples), labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
require.NoError(t, hb.Truncate(1))
require.NoError(t, hb.Close())
// Confirm there's been a checkpoint.
cdir, _, err := wlog.LastCheckpoint(w.Dir())
require.NoError(t, err)
// Read in checkpoint and WAL.
recs := readTestWAL(t, cdir)
recs = append(recs, readTestWAL(t, w.Dir())...)
var series, samples, stones, metadata int
for _, rec := range recs {
switch rec.(type) {
case []record.RefSeries:
series++
case []record.RefSample:
samples++
case []tombstones.Stone:
stones++
case []record.RefMetadata:
metadata++
default:
require.Fail(t, "unknown record type")
}
}
require.Equal(t, 1, series)
require.Equal(t, 9999, samples)
require.Equal(t, 1, stones)
require.Equal(t, 0, metadata)
}
func TestHeadAppenderV2_Delete_e2e(t *testing.T) {
numDatapoints := 1000
numRanges := 1000
timeInterval := int64(2)
// Create 8 series with 1000 data-points of different ranges, delete and run queries.
lbls := [][]labels.Label{
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prom-k8s"},
},
}
seriesMap := map[string][]chunks.Sample{}
for _, l := range lbls {
seriesMap[labels.New(l...).String()] = []chunks.Sample{}
}
hb, _ := newTestHead(t, 100000, compression.None, false)
defer func() {
require.NoError(t, hb.Close())
}()
app := hb.AppenderV2(context.Background())
for _, l := range lbls {
ls := labels.New(l...)
series := []chunks.Sample{}
ts := rand.Int63n(300)
for range numDatapoints {
v := rand.Float64()
_, err := app.Append(0, ls, 0, ts, v, nil, nil, storage.AOptions{})
require.NoError(t, err)
series = append(series, sample{ts, v, nil, nil})
ts += rand.Int63n(timeInterval) + 1
}
seriesMap[labels.New(l...).String()] = series
}
require.NoError(t, app.Commit())
// Delete a time-range from each-selector.
dels := []struct {
ms []*labels.Matcher
drange tombstones.Intervals
}{
{
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "b")},
drange: tombstones.Intervals{{Mint: 300, Maxt: 500}, {Mint: 600, Maxt: 670}},
},
{
ms: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b"),
labels.MustNewMatcher(labels.MatchEqual, "job", "prom-k8s"),
},
drange: tombstones.Intervals{{Mint: 300, Maxt: 500}, {Mint: 100, Maxt: 670}},
},
{
ms: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "c"),
labels.MustNewMatcher(labels.MatchEqual, "instance", "localhost:9090"),
labels.MustNewMatcher(labels.MatchEqual, "job", "prometheus"),
},
drange: tombstones.Intervals{{Mint: 300, Maxt: 400}, {Mint: 100, Maxt: 6700}},
},
// TODO: Add Regexp Matchers.
}
for _, del := range dels {
for _, r := range del.drange {
require.NoError(t, hb.Delete(context.Background(), r.Mint, r.Maxt, del.ms...))
}
matched := labels.Slice{}
for _, l := range lbls {
s := labels.Selector(del.ms)
ls := labels.New(l...)
if s.Matches(ls) {
matched = append(matched, ls)
}
}
sort.Sort(matched)
for range numRanges {
q, err := NewBlockQuerier(hb, 0, 100000)
require.NoError(t, err)
ss := q.Select(context.Background(), true, nil, del.ms...)
// Build the mockSeriesSet.
matchedSeries := make([]storage.Series, 0, len(matched))
for _, m := range matched {
smpls := seriesMap[m.String()]
smpls = deletedSamples(smpls, del.drange)
// Only append those series for which samples exist as mockSeriesSet
// doesn't skip series with no samples.
// TODO: But sometimes SeriesSet returns an empty chunkenc.Iterator
if len(smpls) > 0 {
matchedSeries = append(matchedSeries, storage.NewListSeries(m, smpls))
}
}
expSs := newMockSeriesSet(matchedSeries)
// Compare both SeriesSets.
for {
eok, rok := expSs.Next(), ss.Next()
// Skip a series if iterator is empty.
if rok {
for ss.At().Iterator(nil).Next() == chunkenc.ValNone {
rok = ss.Next()
if !rok {
break
}
}
}
require.Equal(t, eok, rok)
if !eok {
break
}
sexp := expSs.At()
sres := ss.At()
require.Equal(t, sexp.Labels(), sres.Labels())
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
require.NoError(t, ss.Err())
require.Empty(t, ss.Warnings())
require.NoError(t, q.Close())
}
}
}
func TestHeadAppenderV2_UncommittedSamplesNotLostOnTruncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
app := h.appenderV2()
lset := labels.FromStrings("a", "1")
_, err := app.Append(0, lset, 0, 2100, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, h.Truncate(2000))
require.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
require.NoError(t, app.Commit())
q, err := NewBlockQuerier(h, 1500, 2500)
require.NoError(t, err)
defer q.Close()
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
require.True(t, ss.Next())
for ss.Next() {
}
require.NoError(t, ss.Err())
require.Empty(t, ss.Warnings())
}
func TestHeadAppenderV2_TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
app := h.appenderV2()
lset := labels.FromStrings("a", "1")
_, err := app.Append(0, lset, 0, 2100, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, h.Truncate(2000))
require.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
require.NoError(t, app.Rollback())
q, err := NewBlockQuerier(h, 1500, 2500)
require.NoError(t, err)
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
require.False(t, ss.Next())
require.Empty(t, ss.Warnings())
require.NoError(t, q.Close())
// Truncate again, this time the series should be deleted
require.NoError(t, h.Truncate(2050))
require.Equal(t, (*memSeries)(nil), h.series.getByHash(lset.Hash(), lset))
}
func TestHeadAppenderV2_LogRollback(t *testing.T) {
for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
h, w := newTestHead(t, 1000, compress, false)
defer func() {
require.NoError(t, h.Close())
}()
app := h.AppenderV2(context.Background())
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, 1, 2, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Rollback())
recs := readTestWAL(t, w.Dir())
require.Len(t, recs, 1)
series, ok := recs[0].([]record.RefSeries)
require.True(t, ok, "expected series record but got %+v", recs[0])
require.Equal(t, []record.RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, series)
})
}
}
func TestHeadAppenderV2_ReturnsSortedLabelValues(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
app := h.appenderV2()
for i := 100; i > 0; i-- {
for j := range 10 {
lset := labels.FromStrings(
"__name__", fmt.Sprintf("metric_%d", i),
"label", fmt.Sprintf("value_%d", j),
)
_, err := app.Append(0, lset, 0, 2100, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
}
q, err := NewBlockQuerier(h, 1500, 2500)
require.NoError(t, err)
res, _, err := q.LabelValues(context.Background(), "__name__", nil)
require.NoError(t, err)
require.True(t, slices.IsSorted(res))
require.NoError(t, q.Close())
}
func TestHeadAppenderV2_NewWalSegmentOnTruncate(t *testing.T) {
h, wal := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
add := func(ts int64) {
app := h.AppenderV2(context.Background())
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, ts, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
}
add(0)
_, last, err := wlog.Segments(wal.Dir())
require.NoError(t, err)
require.Equal(t, 0, last)
add(1)
require.NoError(t, h.Truncate(1))
_, last, err = wlog.Segments(wal.Dir())
require.NoError(t, err)
require.Equal(t, 1, last)
add(2)
require.NoError(t, h.Truncate(2))
_, last, err = wlog.Segments(wal.Dir())
require.NoError(t, err)
require.Equal(t, 2, last)
}
func TestHeadAppenderV2_Append_DuplicateLabelName(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
add := func(labels labels.Labels, labelName string) {
app := h.AppenderV2(context.Background())
_, err := app.Append(0, labels, 0, 0, 0, nil, nil, storage.AOptions{})
require.EqualError(t, err, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName))
}
add(labels.FromStrings("a", "c", "a", "b"), "a")
add(labels.FromStrings("a", "c", "a", "c"), "a")
add(labels.FromStrings("__name__", "up", "job", "prometheus", "le", "500", "le", "400", "unit", "s"), "le")
}
func TestHeadAppenderV2_MemSeriesIsolation(t *testing.T) {
if defaultIsolationDisabled {
t.Skip("skipping test since tsdb isolation is disabled")
}
// Put a series, select it. GC it and then access it.
lastValue := func(h *Head, maxAppendID uint64) int {
idx, err := h.Index()
require.NoError(t, err)
iso := h.iso.State(math.MinInt64, math.MaxInt64)
iso.maxAppendID = maxAppendID
chunks, err := h.chunksRange(math.MinInt64, math.MaxInt64, iso)
require.NoError(t, err)
// Hm.. here direct block chunk querier might be required?
querier := blockQuerier{
blockBaseQuerier: &blockBaseQuerier{
index: idx,
chunks: chunks,
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/compact.go | tsdb/compact.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"crypto/rand"
"errors"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"slices"
"time"
"github.com/oklog/ulid/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
// ExponentialBlockRanges returns the time ranges based on the stepSize.
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
ranges := make([]int64, 0, steps)
curRange := minSize
for range steps {
ranges = append(ranges, curRange)
curRange *= int64(stepSize)
}
return ranges
}
// Compactor provides compaction against an underlying storage
// of time series data.
type Compactor interface {
// Plan returns a set of directories that can be compacted concurrently.
// The directories can be overlapping.
// Results returned when compactions are in progress are undefined.
Plan(dir string) ([]string, error)
// Write persists one or more Blocks into a directory.
// No Block is written when resulting Block has 0 samples and returns an empty slice.
// Prometheus always return one or no block. The interface allows returning more than one
// block for downstream users to experiment with compactor.
Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error)
// Compact runs compaction against the provided directories. Must
// only be called concurrently with results of Plan().
// Can optionally pass a list of already open blocks,
// to avoid having to reopen them.
// Prometheus always return one or no block. The interface allows returning more than one
// block for downstream users to experiment with compactor.
// When one resulting Block has 0 samples
// * No block is written.
// * The source dirs are marked Deletable.
// * Block is not included in the result.
Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error)
}
// LeveledCompactor implements the Compactor interface.
type LeveledCompactor struct {
metrics *CompactorMetrics
logger *slog.Logger
ranges []int64
chunkPool chunkenc.Pool
ctx context.Context
maxBlockChunkSegmentSize int64
useUncachedIO bool
mergeFunc storage.VerticalChunkSeriesMergeFunc
blockExcludeFunc BlockExcludeFilterFunc
postingsEncoder index.PostingsEncoder
postingsDecoderFactory PostingsDecoderFactory
enableOverlappingCompaction bool
}
type CompactorMetrics struct {
Ran prometheus.Counter
PopulatingBlocks prometheus.Gauge
OverlappingBlocks prometheus.Counter
Duration prometheus.Histogram
ChunkSize prometheus.Histogram
ChunkSamples prometheus.Histogram
ChunkRange prometheus.Histogram
}
// NewCompactorMetrics initializes metrics for Compactor.
func NewCompactorMetrics(r prometheus.Registerer) *CompactorMetrics {
m := &CompactorMetrics{}
m.Ran = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_total",
Help: "Total number of compactions that were executed for the partition.",
})
m.PopulatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_compaction_populating_block",
Help: "Set to 1 when a block is currently being written to the disk.",
})
m.OverlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_vertical_compactions_total",
Help: "Total number of compactions done on overlapping blocks.",
})
m.Duration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_duration_seconds",
Help: "Duration of compaction runs",
Buckets: prometheus.ExponentialBuckets(1, 2, 14),
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
})
m.ChunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_size_bytes",
Help: "Final size of chunks on their first compaction",
Buckets: prometheus.ExponentialBuckets(32, 1.5, 12),
})
m.ChunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_samples",
Help: "Final number of samples on their first compaction",
Buckets: prometheus.ExponentialBuckets(4, 1.5, 12),
})
m.ChunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_range_seconds",
Help: "Final time range of chunks on their first compaction",
Buckets: prometheus.ExponentialBuckets(100, 4, 10),
})
if r != nil {
r.MustRegister(
m.Ran,
m.PopulatingBlocks,
m.OverlappingBlocks,
m.Duration,
m.ChunkRange,
m.ChunkSamples,
m.ChunkSize,
)
}
return m
}
type LeveledCompactorOptions struct {
// PE specifies the postings encoder. It is called when compactor is writing out the postings for a label name/value pair during compaction.
// If it is nil then the default encoder is used. At the moment that is the "raw" encoder. See index.EncodePostingsRaw for more.
PE index.PostingsEncoder
// PD specifies the postings decoder factory to return different postings decoder based on BlockMeta. It is called when opening a block or opening the index file.
// If it is nil then a default decoder is used, compatible with Prometheus v2.
PD PostingsDecoderFactory
// MaxBlockChunkSegmentSize is the max block chunk segment size. If it is 0 then the default chunks.DefaultChunkSegmentSize is used.
MaxBlockChunkSegmentSize int64
// MergeFunc is used for merging series together in vertical compaction. By default storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge) is used.
MergeFunc storage.VerticalChunkSeriesMergeFunc
// BlockExcludeFilter is used to decide which blocks are exluded from compactions.
BlockExcludeFilter BlockExcludeFilterFunc
// EnableOverlappingCompaction enables compaction of overlapping blocks. In Prometheus it is always enabled.
// It is useful for downstream projects like Mimir, Cortex, Thanos where they have a separate component that does compaction.
EnableOverlappingCompaction bool
// Metrics is set of metrics for Compactor. By default, NewCompactorMetrics would be called to initialize metrics unless it is provided.
Metrics *CompactorMetrics
// UseUncachedIO allows bypassing the page cache when appropriate.
UseUncachedIO bool
}
type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder
type BlockExcludeFilterFunc func(meta *BlockMeta) bool
func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder {
return index.DecodePostingsRaw
}
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) {
return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{
MergeFunc: mergeFunc,
EnableOverlappingCompaction: true,
})
}
func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) {
if len(ranges) == 0 {
return nil, errors.New("at least one range must be provided")
}
if pool == nil {
pool = chunkenc.NewPool()
}
if l == nil {
l = promslog.NewNopLogger()
}
mergeFunc := opts.MergeFunc
if mergeFunc == nil {
mergeFunc = storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)
}
maxBlockChunkSegmentSize := opts.MaxBlockChunkSegmentSize
if maxBlockChunkSegmentSize == 0 {
maxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize
}
pe := opts.PE
if pe == nil {
pe = index.EncodePostingsRaw
}
if opts.Metrics == nil {
opts.Metrics = NewCompactorMetrics(r)
}
return &LeveledCompactor{
ranges: ranges,
chunkPool: pool,
logger: l,
metrics: opts.Metrics,
ctx: ctx,
maxBlockChunkSegmentSize: maxBlockChunkSegmentSize,
useUncachedIO: opts.UseUncachedIO,
mergeFunc: mergeFunc,
postingsEncoder: pe,
postingsDecoderFactory: opts.PD,
enableOverlappingCompaction: opts.EnableOverlappingCompaction,
blockExcludeFunc: opts.BlockExcludeFilter,
}, nil
}
type dirMeta struct {
dir string
meta *BlockMeta
}
// Plan returns a list of compactable blocks in the provided directory.
func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
dirs, err := blockDirs(dir)
if err != nil {
return nil, err
}
if len(dirs) < 1 {
return nil, nil
}
var dms []dirMeta
for _, dir := range dirs {
meta, _, err := readMetaFile(dir)
if err != nil {
return nil, err
}
if c.blockExcludeFunc != nil && c.blockExcludeFunc(meta) {
break
}
dms = append(dms, dirMeta{dir, meta})
}
return c.plan(dms)
}
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
if len(dms) == 0 {
return nil, nil
}
slices.SortFunc(dms, func(a, b dirMeta) int {
switch {
case a.meta.MinTime < b.meta.MinTime:
return -1
case a.meta.MinTime > b.meta.MinTime:
return 1
default:
return 0
}
})
res := c.selectOverlappingDirs(dms)
if len(res) > 0 {
return res, nil
}
// No overlapping blocks, do compaction the usual way.
// We do not include a recently created block with max(minTime), so the block which was just created from WAL.
// This gives users a window of a full block size to piece-wise backup new data without having to care about data overlap.
dms = dms[:len(dms)-1]
for _, dm := range c.selectDirs(dms) {
res = append(res, dm.dir)
}
if len(res) > 0 {
return res, nil
}
// Compact any blocks with big enough time range that have >5% tombstones.
for i := len(dms) - 1; i >= 0; i-- {
meta := dms[i].meta
if meta.MaxTime-meta.MinTime < c.ranges[len(c.ranges)/2] {
// If the block is entirely deleted, then we don't care about the block being big enough.
// TODO: This is assuming a single tombstone is for a distinct series, which might not be true.
if meta.Stats.NumTombstones > 0 && meta.Stats.NumTombstones >= meta.Stats.NumSeries {
return []string{dms[i].dir}, nil
}
break
}
if float64(meta.Stats.NumTombstones)/float64(meta.Stats.NumSeries+1) > 0.05 {
return []string{dms[i].dir}, nil
}
}
return nil, nil
}
// selectDirs returns the dir metas that should be compacted into a single new block.
// If only a single block range is configured, the result is always nil.
func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta {
if len(c.ranges) < 2 || len(ds) < 1 {
return nil
}
highTime := ds[len(ds)-1].meta.MinTime
for _, iv := range c.ranges[1:] {
parts := splitByRange(ds, iv)
if len(parts) == 0 {
continue
}
Outer:
for _, p := range parts {
// Do not select the range if it has a block whose compaction failed.
for _, dm := range p {
if dm.meta.Compaction.Failed {
continue Outer
}
}
mint := p[0].meta.MinTime
maxt := p[len(p)-1].meta.MaxTime
// Pick the range of blocks if it spans the full range (potentially with gaps)
// or is before the most recent block.
// This ensures we don't compact blocks prematurely when another one of the same
// size still fits in the range.
if (maxt-mint == iv || maxt <= highTime) && len(p) > 1 {
return p
}
}
}
return nil
}
// selectOverlappingDirs returns all dirs with overlapping time ranges.
// It expects sorted input by mint and returns the overlapping dirs in the same order as received.
func (c *LeveledCompactor) selectOverlappingDirs(ds []dirMeta) []string {
if !c.enableOverlappingCompaction {
return nil
}
if len(ds) < 2 {
return nil
}
var overlappingDirs []string
globalMaxt := ds[0].meta.MaxTime
for i, d := range ds[1:] {
if d.meta.MinTime < globalMaxt {
if len(overlappingDirs) == 0 { // When it is the first overlap, need to add the last one as well.
overlappingDirs = append(overlappingDirs, ds[i].dir)
}
overlappingDirs = append(overlappingDirs, d.dir)
} else if len(overlappingDirs) > 0 {
break
}
if d.meta.MaxTime > globalMaxt {
globalMaxt = d.meta.MaxTime
}
}
return overlappingDirs
}
// splitByRange splits the directories by the time range. The range sequence starts at 0.
//
// For example, if we have blocks [0-10, 10-20, 50-60, 90-100] and the split range tr is 30
// it returns [0-10, 10-20], [50-60], [90-100].
func splitByRange(ds []dirMeta, tr int64) [][]dirMeta {
var splitDirs [][]dirMeta
for i := 0; i < len(ds); {
var (
group []dirMeta
t0 int64
m = ds[i].meta
)
// Compute start of aligned time range of size tr closest to the current block's start.
if m.MinTime >= 0 {
t0 = tr * (m.MinTime / tr)
} else {
t0 = tr * ((m.MinTime - tr + 1) / tr)
}
// Skip blocks that don't fall into the range. This can happen via mis-alignment or
// by being a multiple of the intended range.
if m.MaxTime > t0+tr {
i++
continue
}
// Add all dirs to the current group that are within [t0, t0+tr].
for ; i < len(ds); i++ {
// Either the block falls into the next range or doesn't fit at all (checked above).
if ds[i].meta.MaxTime > t0+tr {
break
}
group = append(group, ds[i])
}
if len(group) > 0 {
splitDirs = append(splitDirs, group)
}
}
return splitDirs
}
// CompactBlockMetas merges many block metas into one, combining its source blocks together
// and adjusting compaction level. Min/Max time of result block meta covers all input blocks.
func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
res := &BlockMeta{
ULID: uid,
}
sources := map[ulid.ULID]struct{}{}
mint := blocks[0].MinTime
maxt := blocks[0].MaxTime
for _, b := range blocks {
if b.MinTime < mint {
mint = b.MinTime
}
if b.MaxTime > maxt {
maxt = b.MaxTime
}
if b.Compaction.Level > res.Compaction.Level {
res.Compaction.Level = b.Compaction.Level
}
for _, s := range b.Compaction.Sources {
sources[s] = struct{}{}
}
res.Compaction.Parents = append(res.Compaction.Parents, BlockDesc{
ULID: b.ULID,
MinTime: b.MinTime,
MaxTime: b.MaxTime,
})
}
res.Compaction.Level++
for s := range sources {
res.Compaction.Sources = append(res.Compaction.Sources, s)
}
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) int {
return a.Compare(b)
})
res.MinTime = mint
res.MaxTime = maxt
return res
}
// Compact creates a new block in the compactor's directory from the blocks in the
// provided directories.
func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error) {
return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{})
}
func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) ([]ulid.ULID, error) {
var (
blocks []BlockReader
bs []*Block
metas []*BlockMeta
uids []string
)
start := time.Now()
for _, d := range dirs {
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
meta, _, err := readMetaFile(d)
if err != nil {
return nil, err
}
var b *Block
// Use already open blocks if we can, to avoid
// having the index data in memory twice.
for _, o := range open {
if meta.ULID == o.Meta().ULID {
b = o
break
}
}
if b == nil {
var err error
b, err = OpenBlock(c.logger, d, c.chunkPool, c.postingsDecoderFactory)
if err != nil {
return nil, err
}
defer b.Close()
}
metas = append(metas, meta)
blocks = append(blocks, b)
bs = append(bs, b)
uids = append(uids, meta.ULID.String())
}
uid := ulid.MustNew(ulid.Now(), rand.Reader)
meta := CompactBlockMetas(uid, metas...)
err := c.write(dest, meta, blockPopulator, blocks...)
if err == nil {
if meta.Stats.NumSamples == 0 {
for _, b := range bs {
b.meta.Compaction.Deletable = true
n, err := writeMetaFile(c.logger, b.dir, &b.meta)
if err != nil {
c.logger.Error(
"Failed to write 'Deletable' to meta file after compaction",
"ulid", b.meta.ULID,
)
}
b.numBytesMeta = n
}
c.logger.Info(
"compact blocks resulted in empty block",
"count", len(blocks),
"sources", fmt.Sprintf("%v", uids),
"duration", time.Since(start),
)
return nil, nil
}
c.logger.Info(
"compact blocks",
"count", len(blocks),
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"ulid", meta.ULID,
"sources", fmt.Sprintf("%v", uids),
"duration", time.Since(start),
)
return []ulid.ULID{uid}, nil
}
errs := tsdb_errors.NewMulti(err)
if !errors.Is(err, context.Canceled) {
for _, b := range bs {
if err := b.setCompactionFailed(); err != nil {
errs.Add(fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err))
}
}
}
return nil, errs.Err()
}
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) {
start := time.Now()
uid := ulid.MustNew(ulid.Now(), rand.Reader)
c.logger.Info("write block started", "mint", mint, "maxt", maxt, "ulid", uid)
meta := &BlockMeta{
ULID: uid,
MinTime: mint,
MaxTime: maxt,
}
meta.Compaction.Level = 1
meta.Compaction.Sources = []ulid.ULID{uid}
if base != nil {
meta.Compaction.Parents = []BlockDesc{
{ULID: base.ULID, MinTime: base.MinTime, MaxTime: base.MaxTime},
}
if base.Compaction.FromOutOfOrder() {
meta.Compaction.SetOutOfOrder()
}
}
err := c.write(dest, meta, DefaultBlockPopulator{}, b)
if err != nil {
return nil, err
}
if meta.Stats.NumSamples == 0 {
c.logger.Info(
"write block resulted in empty block",
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"duration", time.Since(start),
)
return nil, nil
}
c.logger.Info(
"write block completed",
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"ulid", meta.ULID,
"duration", time.Since(start),
"ooo", meta.Compaction.FromOutOfOrder(),
)
return []ulid.ULID{uid}, nil
}
// instrumentedChunkWriter is used for level 1 compactions to record statistics
// about compacted chunks.
type instrumentedChunkWriter struct {
ChunkWriter
size prometheus.Histogram
samples prometheus.Histogram
trange prometheus.Histogram
}
func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error {
for _, c := range chunks {
w.size.Observe(float64(len(c.Chunk.Bytes())))
w.samples.Observe(float64(c.Chunk.NumSamples()))
w.trange.Observe(float64(c.MaxTime - c.MinTime))
}
return w.ChunkWriter.WriteChunks(chunks...)
}
// write creates a new block that is the union of the provided blocks into dir.
func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator BlockPopulator, blocks ...BlockReader) (err error) {
dir := filepath.Join(dest, meta.ULID.String())
tmp := dir + tmpForCreationBlockDirSuffix
var closers []io.Closer
defer func(t time.Time) {
err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err()
// RemoveAll returns no error when tmp doesn't exist so it is safe to always run it.
if err := os.RemoveAll(tmp); err != nil {
c.logger.Error("removed tmp folder after failed compaction", "err", err.Error())
}
c.metrics.Ran.Inc()
c.metrics.Duration.Observe(time.Since(t).Seconds())
}(time.Now())
if err = os.RemoveAll(tmp); err != nil {
return err
}
if err = os.MkdirAll(tmp, 0o777); err != nil {
return err
}
// Populate chunk and index files into temporary directory with
// data of all blocks.
var chunkw ChunkWriter
chunkw, err = chunks.NewWriter(chunkDir(tmp), chunks.WithSegmentSize(c.maxBlockChunkSegmentSize), chunks.WithUncachedIO(c.useUncachedIO))
if err != nil {
return fmt.Errorf("open chunk writer: %w", err)
}
closers = append(closers, chunkw)
// Record written chunk sizes on level 1 compactions.
if meta.Compaction.Level == 1 {
chunkw = &instrumentedChunkWriter{
ChunkWriter: chunkw,
size: c.metrics.ChunkSize,
samples: c.metrics.ChunkSamples,
trange: c.metrics.ChunkRange,
}
}
indexw, err := index.NewWriterWithEncoder(c.ctx, filepath.Join(tmp, indexFilename), c.postingsEncoder)
if err != nil {
return fmt.Errorf("open index writer: %w", err)
}
closers = append(closers, indexw)
if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw, AllSortedPostings); err != nil {
return fmt.Errorf("populate block: %w", err)
}
select {
case <-c.ctx.Done():
return c.ctx.Err()
default:
}
// We are explicitly closing them here to check for error even
// though these are covered under defer. This is because in Windows,
// you cannot delete these unless they are closed and the defer is to
// make sure they are closed if the function exits due to an error above.
errs := tsdb_errors.NewMulti()
for _, w := range closers {
errs.Add(w.Close())
}
closers = closers[:0] // Avoid closing the writers twice in the defer.
if errs.Err() != nil {
return errs.Err()
}
// Populated block is empty, so exit early.
if meta.Stats.NumSamples == 0 {
return nil
}
if _, err = writeMetaFile(c.logger, tmp, meta); err != nil {
return fmt.Errorf("write merged meta: %w", err)
}
// Create an empty tombstones file.
if _, err := tombstones.WriteFile(c.logger, tmp, tombstones.NewMemTombstones()); err != nil {
return fmt.Errorf("write new tombstones file: %w", err)
}
df, err := fileutil.OpenDir(tmp)
if err != nil {
return fmt.Errorf("open temporary block dir: %w", err)
}
defer func() {
if df != nil {
df.Close()
}
}()
if err := df.Sync(); err != nil {
return fmt.Errorf("sync temporary dir file: %w", err)
}
// Close temp dir before rename block dir (for windows platform).
if err = df.Close(); err != nil {
return fmt.Errorf("close temporary dir: %w", err)
}
df = nil
// Block successfully written, make it visible in destination dir by moving it from tmp one.
if err := fileutil.Replace(tmp, dir); err != nil {
return fmt.Errorf("rename block dir: %w", err)
}
return nil
}
type BlockPopulator interface {
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error
}
// IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader.
type IndexReaderPostingsFunc func(ctx context.Context, reader IndexReader) index.Postings
// AllSortedPostings returns a sorted all posting iterator from the input index reader.
func AllSortedPostings(ctx context.Context, reader IndexReader) index.Postings {
k, v := index.AllPostingsKey()
all, err := reader.Postings(ctx, k, v)
if err != nil {
return index.ErrPostings(err)
}
return reader.SortedPostings(all)
}
type DefaultBlockPopulator struct{}
// PopulateBlock fills the index and chunk writers with new data gathered as the union
// of the provided blocks. It returns meta information for the new block.
// It expects sorted blocks input by mint.
func (DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) {
if len(blocks) == 0 {
return errors.New("cannot populate block from no readers")
}
var (
sets []storage.ChunkSeriesSet
symbols index.StringIter
closers []io.Closer
overlapping bool
)
defer func() {
errs := tsdb_errors.NewMulti(err)
if cerr := tsdb_errors.CloseAll(closers); cerr != nil {
errs.Add(fmt.Errorf("close: %w", cerr))
}
err = errs.Err()
metrics.PopulatingBlocks.Set(0)
}()
metrics.PopulatingBlocks.Set(1)
globalMaxt := blocks[0].Meta().MaxTime
for i, b := range blocks {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if !overlapping {
if i > 0 && b.Meta().MinTime < globalMaxt {
metrics.OverlappingBlocks.Inc()
overlapping = true
logger.Info("Found overlapping blocks during compaction", "ulid", meta.ULID)
}
if b.Meta().MaxTime > globalMaxt {
globalMaxt = b.Meta().MaxTime
}
}
indexr, err := b.Index()
if err != nil {
return fmt.Errorf("open index reader for block %+v: %w", b.Meta(), err)
}
closers = append(closers, indexr)
chunkr, err := b.Chunks()
if err != nil {
return fmt.Errorf("open chunk reader for block %+v: %w", b.Meta(), err)
}
closers = append(closers, chunkr)
tombsr, err := b.Tombstones()
if err != nil {
return fmt.Errorf("open tombstone reader for block %+v: %w", b.Meta(), err)
}
closers = append(closers, tombsr)
postings := postingsFunc(ctx, indexr)
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, postings, meta.MinTime, meta.MaxTime-1, false))
syms := indexr.Symbols()
if i == 0 {
symbols = syms
continue
}
symbols = NewMergedStringIter(symbols, syms)
}
for symbols.Next() {
if err := indexw.AddSymbol(symbols.At()); err != nil {
return fmt.Errorf("add symbol: %w", err)
}
}
if err := symbols.Err(); err != nil {
return fmt.Errorf("next symbol: %w", err)
}
var (
ref = storage.SeriesRef(0)
chks []chunks.Meta
chksIter chunks.Iterator
)
set := sets[0]
if len(sets) > 1 {
// Merge series using specified chunk series merger.
// The default one is the compacting series merger.
set = storage.NewMergeChunkSeriesSet(sets, 0, mergeFunc)
}
// Iterate over all sorted chunk series.
for set.Next() {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
s := set.At()
chksIter = s.Iterator(chksIter)
chks = chks[:0]
for chksIter.Next() {
// We are not iterating in a streaming way over chunks as
// it's more efficient to do bulk write for index and
// chunk file purposes.
chks = append(chks, chksIter.At())
}
if err := chksIter.Err(); err != nil {
return fmt.Errorf("chunk iter: %w", err)
}
// Skip series with all deleted chunks.
if len(chks) == 0 {
continue
}
if err := chunkw.WriteChunks(chks...); err != nil {
return fmt.Errorf("write chunks: %w", err)
}
if err := indexw.AddSeries(ref, s.Labels(), chks...); err != nil {
return fmt.Errorf("add series: %w", err)
}
meta.Stats.NumChunks += uint64(len(chks))
meta.Stats.NumSeries++
for _, chk := range chks {
samples := uint64(chk.Chunk.NumSamples())
meta.Stats.NumSamples += samples
switch chk.Chunk.Encoding() {
case chunkenc.EncHistogram, chunkenc.EncFloatHistogram:
meta.Stats.NumHistogramSamples += samples
case chunkenc.EncXOR:
meta.Stats.NumFloatSamples += samples
}
}
for _, chk := range chks {
if err := chunkPool.Put(chk.Chunk); err != nil {
return fmt.Errorf("put chunk: %w", err)
}
}
ref++
}
if err := set.Err(); err != nil {
return fmt.Errorf("iterate compaction set: %w", err)
}
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_test.go | tsdb/head_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"fmt"
"io"
"math"
"math/rand"
"os"
"path"
"path/filepath"
"reflect"
"slices"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
)
// newTestHeadDefaultOptions returns the HeadOptions that should be used by default in unit tests.
func newTestHeadDefaultOptions(chunkRange int64, oooEnabled bool) *HeadOptions {
opts := DefaultHeadOptions()
opts.ChunkRange = chunkRange
opts.EnableExemplarStorage = true
opts.MaxExemplars.Store(config.DefaultExemplarsConfig.MaxExemplars)
if oooEnabled {
opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds())
}
return opts
}
func newTestHead(t testing.TB, chunkRange int64, compressWAL compression.Type, oooEnabled bool) (*Head, *wlog.WL) {
return newTestHeadWithOptions(t, compressWAL, newTestHeadDefaultOptions(chunkRange, oooEnabled))
}
func newTestHeadWithOptions(t testing.TB, compressWAL compression.Type, opts *HeadOptions) (*Head, *wlog.WL) {
dir := t.TempDir()
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compressWAL)
require.NoError(t, err)
// Override the chunks dir with the testing one.
opts.ChunkDirRoot = dir
h, err := NewHead(nil, nil, wal, nil, opts, nil)
require.NoError(t, err)
require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(chunks.HeadSeriesRef, chunks.ChunkDiskMapperRef, int64, int64, uint16, chunkenc.Encoding, bool) error {
return nil
}))
return h, wal
}
func BenchmarkCreateSeries(b *testing.B) {
series := genSeries(b.N, 10, 0, 0)
h, _ := newTestHead(b, 10000, compression.None, false)
b.Cleanup(func() {
require.NoError(b, h.Close())
})
b.ReportAllocs()
b.ResetTimer()
for _, s := range series {
h.getOrCreate(s.Labels().Hash(), s.Labels(), false)
}
}
func populateTestWL(t testing.TB, w *wlog.WL, recs []any, buf []byte) []byte {
var enc record.Encoder
for _, r := range recs {
buf = buf[:0]
switch v := r.(type) {
case []record.RefSeries:
buf = enc.Series(v, buf)
case []record.RefSample:
buf = enc.Samples(v, buf)
case []tombstones.Stone:
buf = enc.Tombstones(v, buf)
case []record.RefExemplar:
buf = enc.Exemplars(v, buf)
case []record.RefHistogramSample:
buf, _ = enc.HistogramSamples(v, buf)
case []record.RefFloatHistogramSample:
buf, _ = enc.FloatHistogramSamples(v, buf)
case []record.RefMmapMarker:
buf = enc.MmapMarkers(v, buf)
case []record.RefMetadata:
buf = enc.Metadata(v, buf)
default:
continue
}
require.NoError(t, w.Log(buf))
}
return buf
}
func readTestWAL(t testing.TB, dir string) (recs []any) {
sr, err := wlog.NewSegmentsReader(dir)
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
r := wlog.NewReader(sr)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
series, err := dec.Series(rec, nil)
require.NoError(t, err)
recs = append(recs, series)
case record.Samples:
samples, err := dec.Samples(rec, nil)
require.NoError(t, err)
recs = append(recs, samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
samples, err := dec.HistogramSamples(rec, nil)
require.NoError(t, err)
recs = append(recs, samples)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
samples, err := dec.FloatHistogramSamples(rec, nil)
require.NoError(t, err)
recs = append(recs, samples)
case record.Tombstones:
tstones, err := dec.Tombstones(rec, nil)
require.NoError(t, err)
recs = append(recs, tstones)
case record.Metadata:
meta, err := dec.Metadata(rec, nil)
require.NoError(t, err)
recs = append(recs, meta)
case record.Exemplars:
exemplars, err := dec.Exemplars(rec, nil)
require.NoError(t, err)
recs = append(recs, exemplars)
default:
require.Fail(t, "unknown record type")
}
}
require.NoError(t, r.Err())
return recs
}
func BenchmarkLoadWLs(b *testing.B) {
cases := []struct {
// Total series is (batches*seriesPerBatch).
batches int
seriesPerBatch int
samplesPerSeries int
mmappedChunkT int64
// The first oooSeriesPct*seriesPerBatch series in a batch are selected as "OOO" series.
oooSeriesPct float64
// The first oooSamplesPct*samplesPerSeries samples in an OOO series are written as OOO samples.
oooSamplesPct float64
oooCapMax int64
}{
{ // Less series and more samples. 2 hour WAL with 1 second scrape interval.
batches: 10,
seriesPerBatch: 100,
samplesPerSeries: 7200,
},
{ // More series and less samples.
batches: 10,
seriesPerBatch: 10000,
samplesPerSeries: 50,
},
{ // In between.
batches: 10,
seriesPerBatch: 1000,
samplesPerSeries: 480,
},
{ // 2 hour WAL with 15 second scrape interval, and mmapped chunks up to last 100 samples.
batches: 100,
seriesPerBatch: 1000,
samplesPerSeries: 480,
mmappedChunkT: 3800,
},
{ // A lot of OOO samples (50% series with 50% of samples being OOO).
batches: 10,
seriesPerBatch: 1000,
samplesPerSeries: 480,
oooSeriesPct: 0.5,
oooSamplesPct: 0.5,
oooCapMax: DefaultOutOfOrderCapMax,
},
{ // Fewer OOO samples (10% of series with 10% of samples being OOO).
batches: 10,
seriesPerBatch: 1000,
samplesPerSeries: 480,
oooSeriesPct: 0.1,
oooSamplesPct: 0.1,
},
{ // 2 hour WAL with 15 second scrape interval, and mmapped chunks up to last 100 samples.
// Four mmap markers per OOO series: 480 * 0.3 = 144, 144 / 32 (DefaultOutOfOrderCapMax) = 4.
batches: 100,
seriesPerBatch: 1000,
samplesPerSeries: 480,
mmappedChunkT: 3800,
oooSeriesPct: 0.2,
oooSamplesPct: 0.3,
oooCapMax: DefaultOutOfOrderCapMax,
},
}
labelsPerSeries := 5
// Rough estimates of most common % of samples that have an exemplar for each scrape.
exemplarsPercentages := []float64{0, 0.5, 1, 5}
lastExemplarsPerSeries := -1
for _, c := range cases {
missingSeriesPercentages := []float64{0, 0.1}
for _, missingSeriesPct := range missingSeriesPercentages {
for _, p := range exemplarsPercentages {
exemplarsPerSeries := int(math.RoundToEven(float64(c.samplesPerSeries) * p / 100))
// For tests with low samplesPerSeries we could end up testing with 0 exemplarsPerSeries
// multiple times without this check.
if exemplarsPerSeries == lastExemplarsPerSeries {
continue
}
lastExemplarsPerSeries = exemplarsPerSeries
b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d,oooSeriesPct=%.3f,oooSamplesPct=%.3f,oooCapMax=%d,missingSeriesPct=%.3f", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries, c.mmappedChunkT, c.oooSeriesPct, c.oooSamplesPct, c.oooCapMax, missingSeriesPct),
func(b *testing.B) {
dir := b.TempDir()
wal, err := wlog.New(nil, nil, dir, compression.None)
require.NoError(b, err)
var wbl *wlog.WL
if c.oooSeriesPct != 0 {
wbl, err = wlog.New(nil, nil, dir, compression.None)
require.NoError(b, err)
}
// Write series.
refSeries := make([]record.RefSeries, 0, c.seriesPerBatch)
var buf []byte
builder := labels.NewBuilder(labels.EmptyLabels())
for j := 1; j < labelsPerSeries; j++ {
builder.Set(defaultLabelName+strconv.Itoa(j), defaultLabelValue+strconv.Itoa(j))
}
for k := 0; k < c.batches; k++ {
refSeries = refSeries[:0]
for i := k * c.seriesPerBatch; i < (k+1)*c.seriesPerBatch; i++ {
builder.Set(defaultLabelName, strconv.Itoa(i))
refSeries = append(refSeries, record.RefSeries{Ref: chunks.HeadSeriesRef(i) * 101, Labels: builder.Labels()})
}
writeSeries := refSeries
if missingSeriesPct > 0 {
newWriteSeries := make([]record.RefSeries, 0, int(float64(len(refSeries))*(1.0-missingSeriesPct)))
keepRatio := 1.0 - missingSeriesPct
// Keep approximately every 1/keepRatio series.
for i, s := range refSeries {
if int(float64(i)*keepRatio) != int(float64(i+1)*keepRatio) {
newWriteSeries = append(newWriteSeries, s)
}
}
writeSeries = newWriteSeries
}
buf = populateTestWL(b, wal, []any{writeSeries}, buf)
}
// Write samples.
refSamples := make([]record.RefSample, 0, c.seriesPerBatch)
oooSeriesPerBatch := int(float64(c.seriesPerBatch) * c.oooSeriesPct)
oooSamplesPerSeries := int(float64(c.samplesPerSeries) * c.oooSamplesPct)
for i := 0; i < c.samplesPerSeries; i++ {
for j := 0; j < c.batches; j++ {
refSamples = refSamples[:0]
k := j * c.seriesPerBatch
// Skip appending the first oooSamplesPerSeries samples for the series in the batch that
// should have OOO samples. OOO samples are appended after all the in-order samples.
if i < oooSamplesPerSeries {
k += oooSeriesPerBatch
}
for ; k < (j+1)*c.seriesPerBatch; k++ {
refSamples = append(refSamples, record.RefSample{
Ref: chunks.HeadSeriesRef(k) * 101,
T: int64(i) * 10,
V: float64(i) * 100,
})
}
buf = populateTestWL(b, wal, []any{refSamples}, buf)
}
}
// Write mmapped chunks.
if c.mmappedChunkT != 0 {
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, mmappedChunksDir(dir), chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
require.NoError(b, err)
cOpts := chunkOpts{
chunkDiskMapper: chunkDiskMapper,
chunkRange: c.mmappedChunkT,
samplesPerChunk: DefaultSamplesPerChunk,
}
for k := 0; k < c.batches*c.seriesPerBatch; k++ {
// Create one mmapped chunk per series, with one sample at the given time.
s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, 0, defaultIsolationDisabled, false)
s.append(c.mmappedChunkT, 42, 0, cOpts)
// There's only one head chunk because only a single sample is appended. mmapChunks()
// ignores the latest chunk, so we need to cut a new head chunk to guarantee the chunk with
// the sample at c.mmappedChunkT is mmapped.
s.cutNewHeadChunk(c.mmappedChunkT, chunkenc.EncXOR, c.mmappedChunkT)
s.mmapChunks(chunkDiskMapper)
}
require.NoError(b, chunkDiskMapper.Close())
}
// Write exemplars.
refExemplars := make([]record.RefExemplar, 0, c.seriesPerBatch)
for i := range exemplarsPerSeries {
for j := 0; j < c.batches; j++ {
refExemplars = refExemplars[:0]
for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ {
refExemplars = append(refExemplars, record.RefExemplar{
Ref: chunks.HeadSeriesRef(k) * 101,
T: int64(i) * 10,
V: float64(i) * 100,
Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)),
})
}
buf = populateTestWL(b, wal, []any{refExemplars}, buf)
}
}
// Write OOO samples and mmap markers.
refMarkers := make([]record.RefMmapMarker, 0, oooSeriesPerBatch)
refSamples = make([]record.RefSample, 0, oooSeriesPerBatch)
for i := range oooSamplesPerSeries {
shouldAddMarkers := c.oooCapMax != 0 && i != 0 && int64(i)%c.oooCapMax == 0
for j := 0; j < c.batches; j++ {
refSamples = refSamples[:0]
if shouldAddMarkers {
refMarkers = refMarkers[:0]
}
for k := j * c.seriesPerBatch; k < (j*c.seriesPerBatch)+oooSeriesPerBatch; k++ {
ref := chunks.HeadSeriesRef(k) * 101
if shouldAddMarkers {
// loadWBL() checks that the marker's MmapRef is less than or equal to the ref
// for the last mmap chunk. Setting MmapRef to 0 to always pass that check.
refMarkers = append(refMarkers, record.RefMmapMarker{Ref: ref, MmapRef: 0})
}
refSamples = append(refSamples, record.RefSample{
Ref: ref,
T: int64(i) * 10,
V: float64(i) * 100,
})
}
if shouldAddMarkers {
populateTestWL(b, wbl, []any{refMarkers}, buf)
}
buf = populateTestWL(b, wal, []any{refSamples}, buf)
buf = populateTestWL(b, wbl, []any{refSamples}, buf)
}
}
b.ResetTimer()
// Load the WAL.
for b.Loop() {
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = dir
if c.oooCapMax > 0 {
opts.OutOfOrderCapMax.Store(c.oooCapMax)
}
h, err := NewHead(nil, nil, wal, wbl, opts, nil)
require.NoError(b, err)
h.Init(0)
}
b.StopTimer()
wal.Close()
if wbl != nil {
wbl.Close()
}
})
}
}
}
}
// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set.
// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located.
//
// Using an absolute path for BENCHMARK_LOAD_REAL_WLS_DIR is recommended.
//
// Because WLs loading may alter BENCHMARK_LOAD_REAL_WLS_DIR which can affect benchmark results and to ensure consistency,
// a copy of BENCHMARK_LOAD_REAL_WLS_DIR is made for each iteration and deleted at the end.
// Make sure there is sufficient disk space for that.
func BenchmarkLoadRealWLs(b *testing.B) {
srcDir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR")
if srcDir == "" {
b.SkipNow()
}
// Load the WAL.
for b.Loop() {
b.StopTimer()
dir := b.TempDir()
require.NoError(b, fileutil.CopyDirs(srcDir, dir))
wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), compression.None)
require.NoError(b, err)
b.Cleanup(func() { wal.Close() })
wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), compression.None)
require.NoError(b, err)
b.Cleanup(func() { wbl.Close() })
b.StartTimer()
opts := DefaultHeadOptions()
opts.ChunkDirRoot = dir
h, err := NewHead(nil, nil, wal, wbl, opts, nil)
require.NoError(b, err)
require.NoError(b, h.Init(0))
b.StopTimer()
require.NoError(b, os.RemoveAll(dir))
}
}
// TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
// returned results are correct.
func TestHead_HighConcurrencyReadAndWrite(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
seriesCnt := 1000
readConcurrency := 2
writeConcurrency := 10
startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
qryRange := uint64(5 * time.Minute.Milliseconds())
step := uint64(15 * time.Second / time.Millisecond)
endTs := startTs + uint64(DefaultBlockDuration)
labelSets := make([]labels.Labels, seriesCnt)
for i := range seriesCnt {
labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
}
head.Init(0)
g, ctx := errgroup.WithContext(context.Background())
whileNotCanceled := func(f func() (bool, error)) error {
for ctx.Err() == nil {
cont, err := f()
if err != nil {
return err
}
if !cont {
return nil
}
}
return nil
}
// Create one channel for each write worker, the channels will be used by the coordinator
// go routine to coordinate which timestamps each write worker has to write.
writerTsCh := make([]chan uint64, writeConcurrency)
for writerTsChIdx := range writerTsCh {
writerTsCh[writerTsChIdx] = make(chan uint64)
}
// workerReadyWg is used to synchronize the start of the test,
// we only start the test once all workers signal that they're ready.
var workerReadyWg sync.WaitGroup
workerReadyWg.Add(writeConcurrency + readConcurrency)
// Start the write workers.
for wid := range writeConcurrency {
// Create copy of workerID to be used by worker routine.
workerID := wid
g.Go(func() error {
// The label sets which this worker will write.
workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
// Signal that this worker is ready.
workerReadyWg.Done()
return whileNotCanceled(func() (bool, error) {
ts, ok := <-writerTsCh[workerID]
if !ok {
return false, nil
}
app := head.Appender(ctx)
for i := range workerLabelSets {
// We also use the timestamp as the sample value.
_, err := app.Append(0, workerLabelSets[i], int64(ts), float64(ts))
if err != nil {
return false, fmt.Errorf("Error when appending to head: %w", err)
}
}
return true, app.Commit()
})
})
}
// queryHead is a helper to query the head for a given time range and labelset.
queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
if err != nil {
return nil, err
}
return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
}
// readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
readerTsCh := make(chan uint64)
// Start the read workers.
for wid := range readConcurrency {
// Create copy of threadID to be used by worker routine.
workerID := wid
g.Go(func() error {
querySeriesRef := (seriesCnt / readConcurrency) * workerID
// Signal that this worker is ready.
workerReadyWg.Done()
return whileNotCanceled(func() (bool, error) {
ts, ok := <-readerTsCh
if !ok {
return false, nil
}
querySeriesRef = (querySeriesRef + 1) % seriesCnt
lbls := labelSets[querySeriesRef]
// lbls has a single entry; extract it so we can run a query.
var lbl labels.Label
lbls.Range(func(l labels.Label) {
lbl = l
})
samples, err := queryHead(ts-qryRange, ts, lbl)
if err != nil {
return false, err
}
if len(samples) != 1 {
return false, fmt.Errorf("expected 1 series, got %d", len(samples))
}
series := lbls.String()
expectSampleCnt := qryRange/step + 1
if expectSampleCnt != uint64(len(samples[series])) {
return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
}
for sampleIdx, sample := range samples[series] {
expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
if sample.T() != int64(expectedValue) {
return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
}
if sample.F() != float64(expectedValue) {
return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
}
}
return true, nil
})
})
}
// Start the coordinator go routine.
g.Go(func() error {
currTs := startTs
defer func() {
// End of the test, close all channels to stop the workers.
for _, ch := range writerTsCh {
close(ch)
}
close(readerTsCh)
}()
// Wait until all workers are ready to start the test.
workerReadyWg.Wait()
return whileNotCanceled(func() (bool, error) {
// Send the current timestamp to each of the writers.
for _, ch := range writerTsCh {
select {
case ch <- currTs:
case <-ctx.Done():
return false, nil
}
}
// Once data for at least <qryRange> has been ingested, send the current timestamp to the readers.
if currTs > startTs+qryRange {
select {
case readerTsCh <- currTs - step:
case <-ctx.Done():
return false, nil
}
}
currTs += step
if currTs > endTs {
return false, nil
}
return true, nil
})
})
require.NoError(t, g.Wait())
}
func TestHead_ReadWAL(t *testing.T) {
for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
entries := []any{
[]record.RefSeries{
{Ref: 10, Labels: labels.FromStrings("a", "1")},
{Ref: 11, Labels: labels.FromStrings("a", "2")},
{Ref: 100, Labels: labels.FromStrings("a", "3")},
},
[]record.RefSample{
{Ref: 0, T: 99, V: 1},
{Ref: 10, T: 100, V: 2},
{Ref: 100, T: 100, V: 3},
},
[]record.RefSeries{
{Ref: 50, Labels: labels.FromStrings("a", "4")},
// This series has two refs pointing to it.
{Ref: 101, Labels: labels.FromStrings("a", "3")},
},
[]record.RefSample{
{Ref: 10, T: 101, V: 5},
{Ref: 50, T: 101, V: 6},
// Sample for duplicate series record.
{Ref: 101, T: 101, V: 7},
},
[]tombstones.Stone{
{Ref: 0, Intervals: []tombstones.Interval{{Mint: 99, Maxt: 101}}},
// Tombstone for duplicate series record.
{Ref: 101, Intervals: []tombstones.Interval{{Mint: 0, Maxt: 100}}},
},
[]record.RefExemplar{
{Ref: 10, T: 100, V: 1, Labels: labels.FromStrings("trace_id", "asdf")},
// Exemplar for duplicate series record.
{Ref: 101, T: 101, V: 7, Labels: labels.FromStrings("trace_id", "zxcv")},
},
[]record.RefMetadata{
// Metadata for duplicate series record.
{Ref: 101, Type: uint8(record.Counter), Unit: "foo", Help: "total foo"},
},
}
head, w := newTestHead(t, 1000, compress, false)
defer func() {
require.NoError(t, head.Close())
}()
populateTestWL(t, w, entries, nil)
require.NoError(t, head.Init(math.MinInt64))
require.Equal(t, uint64(101), head.lastSeriesID.Load())
s10 := head.series.getByID(10)
s11 := head.series.getByID(11)
s50 := head.series.getByID(50)
s100 := head.series.getByID(100)
s101 := head.series.getByID(101)
testutil.RequireEqual(t, labels.FromStrings("a", "1"), s10.lset)
require.Nil(t, s11) // Series without samples should be garbage collected at head.Init().
testutil.RequireEqual(t, labels.FromStrings("a", "4"), s50.lset)
testutil.RequireEqual(t, labels.FromStrings("a", "3"), s100.lset)
// Duplicate series record should not be written to the head.
require.Nil(t, s101)
// But it should have a WAL expiry set.
keepUntil, ok := head.getWALExpiry(101)
require.True(t, ok)
require.Equal(t, int64(101), keepUntil)
// Only the duplicate series record should have a WAL expiry set.
_, ok = head.getWALExpiry(50)
require.False(t, ok)
expandChunk := func(c chunkenc.Iterator) (x []sample) {
for c.Next() == chunkenc.ValFloat {
t, v := c.At()
x = append(x, sample{t: t, f: v})
}
require.NoError(t, c.Err())
return x
}
// Verify samples and exemplar for series 10.
c, _, _, err := s10.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
require.Equal(t, []sample{{100, 2, nil, nil}, {101, 5, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
q, err := head.ExemplarQuerier(context.Background())
require.NoError(t, err)
e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")})
require.NoError(t, err)
require.NotEmpty(t, e)
require.NotEmpty(t, e[0].Exemplars)
require.True(t, exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("trace_id", "asdf")}.Equals(e[0].Exemplars[0]))
// Verify samples for series 50
c, _, _, err = s50.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
require.Equal(t, []sample{{101, 6, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
// Verify records for series 100 and its duplicate, series 101.
// The samples before the new series record should be discarded since a duplicate record
// is only possible when old samples were compacted.
c, _, _, err = s100.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
require.Equal(t, []sample{{101, 7, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
q, err = head.ExemplarQuerier(context.Background())
require.NoError(t, err)
e, err = q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "3")})
require.NoError(t, err)
require.NotEmpty(t, e)
require.NotEmpty(t, e[0].Exemplars)
require.True(t, exemplar.Exemplar{Ts: 101, Value: 7, Labels: labels.FromStrings("trace_id", "zxcv")}.Equals(e[0].Exemplars[0]))
require.NotNil(t, s100.meta)
require.Equal(t, "foo", s100.meta.Unit)
require.Equal(t, "total foo", s100.meta.Help)
intervals, err := head.tombstones.Get(storage.SeriesRef(s100.ref))
require.NoError(t, err)
require.Equal(t, tombstones.Intervals{{Mint: 0, Maxt: 100}}, intervals)
})
}
}
func TestHead_WALMultiRef(t *testing.T) {
head, w := newTestHead(t, 1000, compression.None, false)
require.NoError(t, head.Init(0))
app := head.Appender(context.Background())
ref1, err := app.Append(0, labels.FromStrings("foo", "bar"), 100, 1)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
// Add another sample outside chunk range to mmap a chunk.
app = head.Appender(context.Background())
_, err = app.Append(0, labels.FromStrings("foo", "bar"), 1500, 2)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
require.NoError(t, head.Truncate(1600))
app = head.Appender(context.Background())
ref2, err := app.Append(0, labels.FromStrings("foo", "bar"), 1700, 3)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 3.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
// Add another sample outside chunk range to mmap a chunk.
app = head.Appender(context.Background())
_, err = app.Append(0, labels.FromStrings("foo", "bar"), 2000, 4)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 4.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
require.NotEqual(t, ref1, ref2, "Refs are the same")
require.NoError(t, head.Close())
w, err = wlog.New(nil, nil, w.Dir(), compression.None)
require.NoError(t, err)
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = head.opts.ChunkDirRoot
head, err = NewHead(nil, nil, w, nil, opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(0))
defer func() {
require.NoError(t, head.Close())
}()
q, err := NewBlockQuerier(head, 0, 2100)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
// The samples before the new ref should be discarded since Head truncation
// happens only after compacting the Head.
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {
sample{1700, 3, nil, nil},
sample{2000, 4, nil, nil},
}}, series)
}
func TestHead_WALCheckpointMultiRef(t *testing.T) {
cases := []struct {
name string
walEntries []any
expectedWalExpiry int64
walTruncateMinT int64
expectedWalEntries []any
}{
{
name: "Samples only; keep needed duplicate series record",
walEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefSample{
{Ref: 1, T: 100, V: 1},
{Ref: 2, T: 200, V: 2},
{Ref: 2, T: 500, V: 3},
},
},
expectedWalExpiry: 500,
walTruncateMinT: 500,
expectedWalEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefSample{
{Ref: 2, T: 500, V: 3},
},
},
},
{
name: "Tombstones only; keep needed duplicate series record",
walEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]tombstones.Stone{
{Ref: 1, Intervals: []tombstones.Interval{{Mint: 0, Maxt: 100}}},
{Ref: 2, Intervals: []tombstones.Interval{{Mint: 0, Maxt: 200}}},
{Ref: 2, Intervals: []tombstones.Interval{{Mint: 0, Maxt: 500}}},
},
},
expectedWalExpiry: 500,
walTruncateMinT: 500,
expectedWalEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]tombstones.Stone{
{Ref: 2, Intervals: []tombstones.Interval{{Mint: 0, Maxt: 500}}},
},
},
},
{
name: "Exemplars only; keep needed duplicate series record",
walEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefExemplar{
{Ref: 1, T: 100, V: 1, Labels: labels.FromStrings("trace_id", "asdf")},
{Ref: 2, T: 200, V: 2, Labels: labels.FromStrings("trace_id", "asdf")},
{Ref: 2, T: 500, V: 3, Labels: labels.FromStrings("trace_id", "asdf")},
},
},
expectedWalExpiry: 500,
walTruncateMinT: 500,
expectedWalEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefExemplar{
{Ref: 2, T: 500, V: 3, Labels: labels.FromStrings("trace_id", "asdf")},
},
},
},
{
name: "Histograms only; keep needed duplicate series record",
walEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefHistogramSample{
{Ref: 1, T: 100, H: &histogram.Histogram{}},
{Ref: 2, T: 200, H: &histogram.Histogram{}},
{Ref: 2, T: 500, H: &histogram.Histogram{}},
},
},
expectedWalExpiry: 500,
walTruncateMinT: 500,
expectedWalEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefHistogramSample{
{Ref: 2, T: 500, H: &histogram.Histogram{}},
},
},
},
{
name: "Float histograms only; keep needed duplicate series record",
walEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefFloatHistogramSample{
{Ref: 1, T: 100, FH: &histogram.FloatHistogram{}},
{Ref: 2, T: 200, FH: &histogram.FloatHistogram{}},
{Ref: 2, T: 500, FH: &histogram.FloatHistogram{}},
},
},
expectedWalExpiry: 500,
walTruncateMinT: 500,
expectedWalEntries: []any{
[]record.RefSeries{
{Ref: 1, Labels: labels.FromStrings("a", "1")},
{Ref: 2, Labels: labels.FromStrings("a", "1")},
},
[]record.RefFloatHistogramSample{
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/querier_bench_test.go | tsdb/querier_bench_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/index"
)
// Make entries ~50B in size, to emulate real-world high cardinality.
const (
postingsBenchSuffix = "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd"
)
func BenchmarkQuerier(b *testing.B) {
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = b.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, h.Close())
}()
app := h.Appender(context.Background())
addSeries := func(l labels.Labels) {
app.Append(0, l, 0, 0)
}
for n := range 10 {
addSeries(labels.FromStrings("a", strconv.Itoa(n)+postingsBenchSuffix))
for i := range 100000 {
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "foo"))
// Have some series that won't be matched, to properly test inverted matches.
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "0_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "1_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "2_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "foo"))
}
require.NoError(b, app.Commit())
app = h.Appender(context.Background())
}
require.NoError(b, app.Commit())
b.Run("Head", func(b *testing.B) {
ir, err := h.Index()
require.NoError(b, err)
defer func() {
require.NoError(b, ir.Close())
}()
b.Run("PostingsForMatchers", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
b.Run("labelValuesWithMatchers", func(b *testing.B) {
benchmarkLabelValuesWithMatchers(b, ir)
})
})
b.Run("Block", func(b *testing.B) {
blockdir := createBlockFromHead(b, b.TempDir(), h)
block, err := OpenBlock(nil, blockdir, nil, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
ir, err := block.Index()
require.NoError(b, err)
defer func() {
require.NoError(b, ir.Close())
}()
b.Run("PostingsForMatchers", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
b.Run("labelValuesWithMatchers", func(b *testing.B) {
benchmarkLabelValuesWithMatchers(b, ir)
})
})
}
func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
ctx := context.Background()
a1 := labels.MustNewMatcher(labels.MatchEqual, "a", "1"+postingsBenchSuffix)
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
n0_1 := labels.MustNewMatcher(labels.MatchEqual, "n", "0_1"+postingsBenchSuffix)
nX := labels.MustNewMatcher(labels.MatchEqual, "n", "X"+postingsBenchSuffix)
jFoo := labels.MustNewMatcher(labels.MatchEqual, "j", "foo")
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")
i1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.*")
iStar1 := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*1")
iStar1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*1.*")
iPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+")
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.+")
iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "")
iNotEmpty := labels.MustNewMatcher(labels.MatchNotEqual, "i", "")
iNot2 := labels.MustNewMatcher(labels.MatchNotEqual, "i", "2"+postingsBenchSuffix)
iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "2.*")
iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*2.*")
jFooBar := labels.MustNewMatcher(labels.MatchRegexp, "j", "foo|bar")
jXXXYYY := labels.MustNewMatcher(labels.MatchRegexp, "j", "XXX|YYY")
jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+")
iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]")
iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)")
iNotAlternate := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "(1|2|3|4|5|6|20|55)")
iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z")
iNotXYZ := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "X|Y|Z")
cases := []struct {
name string
matchers []*labels.Matcher
}{
{`n="1"`, []*labels.Matcher{n1}},
{`n="X"`, []*labels.Matcher{nX}},
{`n="1",j="foo"`, []*labels.Matcher{n1, jFoo}},
{`n="X",j="foo"`, []*labels.Matcher{nX, jFoo}},
{`j="foo",n="1"`, []*labels.Matcher{jFoo, n1}},
{`n="1",j!="foo"`, []*labels.Matcher{n1, jNotFoo}},
{`n="1",i!="2"`, []*labels.Matcher{n1, iNot2}},
{`n="0_1",j="foo,a="1"`, []*labels.Matcher{n0_1, jFoo, a1}},
{`n="X",j!="foo"`, []*labels.Matcher{nX, jNotFoo}},
{`i=~"1[0-9]",j=~"foo|bar"`, []*labels.Matcher{iCharSet, jFooBar}},
{`j=~"foo|bar"`, []*labels.Matcher{jFooBar}},
{`j=~"XXX|YYY"`, []*labels.Matcher{jXXXYYY}},
{`j=~"X.+"`, []*labels.Matcher{jXplus}},
{`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}},
{`i!~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iNotAlternate}},
{`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}},
{`i!~"X|Y|Z"`, []*labels.Matcher{iNotXYZ}},
{`i=~".*"`, []*labels.Matcher{iStar}},
{`i=~"1.*"`, []*labels.Matcher{i1Star}},
{`i=~".*1"`, []*labels.Matcher{iStar1}},
{`i=~".+"`, []*labels.Matcher{iPlus}},
{`i=~".+",j=~"X.+"`, []*labels.Matcher{iPlus, jXplus}},
{`i=~""`, []*labels.Matcher{iEmptyRe}},
{`i!=""`, []*labels.Matcher{iNotEmpty}},
{`n="1",i=~".*",j="foo"`, []*labels.Matcher{n1, iStar, jFoo}},
{`n="X",i=~".*",j="foo"`, []*labels.Matcher{nX, iStar, jFoo}},
{`n="1",i=~".*",i!="2",j="foo"`, []*labels.Matcher{n1, iStar, iNot2, jFoo}},
{`n="1",i!=""`, []*labels.Matcher{n1, iNotEmpty}},
{`n="1",i!="",j="foo"`, []*labels.Matcher{n1, iNotEmpty, jFoo}},
{`n="1",i!="",j=~"X.+"`, []*labels.Matcher{n1, iNotEmpty, jXplus}},
{`n="1",i!="",j=~"XXX|YYY"`, []*labels.Matcher{n1, iNotEmpty, jXXXYYY}},
{`n="1",i=~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iXYZ, jFoo}},
{`n="1",i!~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iNotXYZ, jFoo}},
{`n="1",i=~".+",j="foo"`, []*labels.Matcher{n1, iPlus, jFoo}},
{`n="1",i=~"1.+",j="foo"`, []*labels.Matcher{n1, i1Plus, jFoo}},
{`n="1",i=~".*1.*",j="foo"`, []*labels.Matcher{n1, iStar1Star, jFoo}},
{`n="1",i=~".+",i!="2",j="foo"`, []*labels.Matcher{n1, iPlus, iNot2, jFoo}},
{`n="1",i=~".+",i!~"2.*",j="foo"`, []*labels.Matcher{n1, iPlus, iNot2Star, jFoo}},
{`n="1",i=~".+",i!~".*2.*",j="foo"`, []*labels.Matcher{n1, iPlus, iNotStar2Star, jFoo}},
{`n="X",i=~".+",i!~".*2.*",j="foo"`, []*labels.Matcher{nX, iPlus, iNotStar2Star, jFoo}},
}
for _, c := range cases {
b.Run(c.name, func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
p, err := PostingsForMatchers(ctx, ir, c.matchers...)
require.NoError(b, err)
// Iterate over the postings
for p.Next() {
// Do nothing
}
}
})
}
}
func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
i1 := labels.MustNewMatcher(labels.MatchEqual, "i", "1")
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.+")
i1PostingsBenchSuffix := labels.MustNewMatcher(labels.MatchEqual, "i", "1"+postingsBenchSuffix)
iSuffix := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+ddd")
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
jXXXYYY := labels.MustNewMatcher(labels.MatchRegexp, "j", "XXX|YYY")
jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+")
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
nX := labels.MustNewMatcher(labels.MatchNotEqual, "n", "X"+postingsBenchSuffix)
nPlus := labels.MustNewMatcher(labels.MatchRegexp, "n", ".+")
ctx := context.Background()
cases := []struct {
name string
labelName string
matchers []*labels.Matcher
}{
// i is never "1", this isn't matching anything.
{`i with i="1"`, "i", []*labels.Matcher{i1}},
// i has 100k values.
{`i with n="1"`, "i", []*labels.Matcher{n1}},
{`i with n=".+"`, "i", []*labels.Matcher{nPlus}},
{`i with n="1",j!="foo"`, "i", []*labels.Matcher{n1, jNotFoo}},
{`i with n="1",j=~"X.+"`, "i", []*labels.Matcher{n1, jXplus}},
{`i with n="1",j=~"XXX|YYY"`, "i", []*labels.Matcher{n1, jXXXYYY}},
{`i with n="X",j!="foo"`, "i", []*labels.Matcher{nX, jNotFoo}},
{`i with n="1",i=~".*",j!="foo"`, "i", []*labels.Matcher{n1, iStar, jNotFoo}},
// matchers on i itself
{`i with i="1aaa...ddd"`, "i", []*labels.Matcher{i1PostingsBenchSuffix}},
{`i with i=~"1.+"`, "i", []*labels.Matcher{i1Plus}},
{`i with i=~"1.+",i=~".+ddd"`, "i", []*labels.Matcher{i1Plus, iSuffix}},
// n has 10 values.
{`n with j!="foo"`, "n", []*labels.Matcher{jNotFoo}},
{`n with i="1"`, "n", []*labels.Matcher{i1}},
{`n with i=~"1.+"`, "n", []*labels.Matcher{i1Plus}},
// there's no label "none"
{`none with i=~"1"`, "none", []*labels.Matcher{i1Plus}},
}
for _, c := range cases {
b.Run(c.name, func(b *testing.B) {
for b.Loop() {
_, err := labelValuesWithMatchers(ctx, ir, c.labelName, nil, c.matchers...)
require.NoError(b, err)
}
})
}
}
func BenchmarkMergedStringIter(b *testing.B) {
numSymbols := 100000
s := make([]string, numSymbols)
for i := range numSymbols {
s[i] = fmt.Sprintf("symbol%v", i)
}
for b.Loop() {
it := NewMergedStringIter(index.NewStringListIter(s), index.NewStringListIter(s))
for range 100 {
it = NewMergedStringIter(it, index.NewStringListIter(s))
}
for it.Next() {
require.NotNil(b, it.At())
require.NoError(b, it.Err())
}
}
b.ReportAllocs()
}
func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(app storage.Appender, i int)) (*Head, *DB) {
dir := b.TempDir()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 255
opts.OutOfOrderTimeWindow = 1000
db, err := Open(dir, nil, nil, opts, nil)
require.NoError(b, err)
b.Cleanup(func() {
require.NoError(b, db.Close())
})
h := db.Head()
app := h.Appender(context.Background())
for i := range numSeries {
addSeries(app, i)
if i%1000 == 999 { // Commit every so often, so the appender doesn't get too big.
require.NoError(b, app.Commit())
app = h.Appender(context.Background())
}
}
require.NoError(b, app.Commit())
return h, db
}
func benchmarkSelect(b *testing.B, queryable storage.Queryable, numSeries int, sorted bool) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
b.ResetTimer()
for s := 1; s <= numSeries; s *= 10 {
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
q, err := queryable.Querier(0, int64(s-1))
require.NoError(b, err)
b.ResetTimer()
for b.Loop() {
ss := q.Select(context.Background(), sorted, nil, matcher)
for ss.Next() {
}
require.NoError(b, ss.Err())
}
q.Close()
})
}
}
func BenchmarkQuerierSelect(b *testing.B) {
numSeries := 1000000
h, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
_, err := app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
if err != nil {
b.Fatal(err)
}
})
b.Run("Head", func(b *testing.B) {
benchmarkSelect(b, db, numSeries, false)
})
b.Run("SortedHead", func(b *testing.B) {
benchmarkSelect(b, db, numSeries, true)
})
b.Run("Block", func(b *testing.B) {
tmpdir := b.TempDir()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
benchmarkSelect(b, (*queryableBlock)(block), numSeries, false)
})
}
// Type wrapper to let a Block be a Queryable in benchmarkSelect().
type queryableBlock Block
func (pb *queryableBlock) Querier(mint, maxt int64) (storage.Querier, error) {
return NewBlockQuerier((*Block)(pb), mint, maxt)
}
func BenchmarkQuerierSelectWithOutOfOrder(b *testing.B) {
numSeries := 1000000
_, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
l := labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix))
ref, err := app.Append(0, l, int64(i+1), 0)
if err != nil {
b.Fatal(err)
}
_, err = app.Append(ref, l, int64(i), 1) // Out of order sample
if err != nil {
b.Fatal(err)
}
})
b.Run("Head", func(b *testing.B) {
benchmarkSelect(b, db, numSeries, false)
})
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_append.go | tsdb/head_append.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"log/slog"
"math"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
)
// initAppender is a helper to initialize the time bounds of the head
// upon the first sample it receives.
type initAppender struct {
app storage.Appender
head *Head
}
var _ storage.GetRef = &initAppender{}
func (a *initAppender) SetOptions(opts *storage.AppendOptions) {
if a.app != nil {
a.app.SetOptions(opts)
}
}
func (a *initAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if a.app != nil {
return a.app.Append(ref, lset, t, v)
}
a.head.initTime(t)
a.app = a.head.appender()
return a.app.Append(ref, lset, t, v)
}
func (a *initAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
// Check if exemplar storage is enabled.
if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 {
return 0, nil
}
if a.app != nil {
return a.app.AppendExemplar(ref, l, e)
}
// We should never reach here given we would call Append before AppendExemplar
// and we probably want to always base head/WAL min time on sample times.
a.head.initTime(e.Ts)
a.app = a.head.appender()
return a.app.AppendExemplar(ref, l, e)
}
func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if a.app != nil {
return a.app.AppendHistogram(ref, l, t, h, fh)
}
a.head.initTime(t)
a.app = a.head.appender()
return a.app.AppendHistogram(ref, l, t, h, fh)
}
func (a *initAppender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, t, st int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if a.app != nil {
return a.app.AppendHistogramSTZeroSample(ref, l, t, st, h, fh)
}
a.head.initTime(t)
a.app = a.head.appender()
return a.app.AppendHistogramSTZeroSample(ref, l, t, st, h, fh)
}
func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
if a.app != nil {
return a.app.UpdateMetadata(ref, l, m)
}
a.app = a.head.appender()
return a.app.UpdateMetadata(ref, l, m)
}
func (a *initAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, st int64) (storage.SeriesRef, error) {
if a.app != nil {
return a.app.AppendSTZeroSample(ref, lset, t, st)
}
a.head.initTime(t)
a.app = a.head.appender()
return a.app.AppendSTZeroSample(ref, lset, t, st)
}
// initTime initializes a head with the first timestamp. This only needs to be called
// for a completely fresh head with an empty WAL.
func (h *Head) initTime(t int64) {
if !h.minTime.CompareAndSwap(math.MaxInt64, t) {
return
}
// Ensure that max time is initialized to at least the min time we just set.
// Concurrent appenders may already have set it to a higher value.
h.maxTime.CompareAndSwap(math.MinInt64, t)
}
func (a *initAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) {
if g, ok := a.app.(storage.GetRef); ok {
return g.GetRef(lset, hash)
}
return 0, labels.EmptyLabels()
}
func (a *initAppender) Commit() error {
if a.app == nil {
a.head.metrics.activeAppenders.Dec()
return nil
}
return a.app.Commit()
}
func (a *initAppender) Rollback() error {
if a.app == nil {
a.head.metrics.activeAppenders.Dec()
return nil
}
return a.app.Rollback()
}
// Appender returns a new Appender on the database.
func (h *Head) Appender(context.Context) storage.Appender {
h.metrics.activeAppenders.Inc()
// The head cache might not have a starting point yet. The init appender
// picks up the first appended timestamp as the base.
if !h.initialized() {
return &initAppender{
head: h,
}
}
return h.appender()
}
func (h *Head) appender() *headAppender {
minValidTime := h.appendableMinValidTime()
appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback.
return &headAppender{
headAppenderBase: headAppenderBase{
head: h,
minValidTime: minValidTime,
mint: math.MaxInt64,
maxt: math.MinInt64,
headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
seriesRefs: h.getRefSeriesBuffer(),
series: h.getSeriesBuffer(),
typesInBatch: h.getTypeMap(),
appendID: appendID,
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
},
}
}
// appendableMinValidTime returns the minimum valid timestamp for appends,
// such that samples stay ahead of prior blocks and the head compaction window.
func (h *Head) appendableMinValidTime() int64 {
// This boundary ensures that no samples will be added to the compaction window.
// This allows race-free, concurrent appending and compaction.
cwEnd := h.MaxTime() - h.chunkRange.Load()/2
// This boundary ensures that we avoid overlapping timeframes from one block to the next.
// While not necessary for correctness, it means we're not required to use vertical compaction.
minValid := h.minValidTime.Load()
return max(cwEnd, minValid)
}
// AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head.
// Returns false if Head hasn't been initialized yet and the minimum time isn't known yet.
func (h *Head) AppendableMinValidTime() (int64, bool) {
if !h.initialized() {
return 0, false
}
return h.appendableMinValidTime(), true
}
func (h *Head) getRefSeriesBuffer() []record.RefSeries {
b := h.refSeriesPool.Get()
if b == nil {
return make([]record.RefSeries, 0, 512)
}
return b
}
func (h *Head) putRefSeriesBuffer(b []record.RefSeries) {
h.refSeriesPool.Put(b[:0])
}
func (h *Head) getFloatBuffer() []record.RefSample {
b := h.floatsPool.Get()
if b == nil {
return make([]record.RefSample, 0, 512)
}
return b
}
func (h *Head) putFloatBuffer(b []record.RefSample) {
h.floatsPool.Put(b[:0])
}
func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef {
b := h.exemplarsPool.Get()
if b == nil {
return make([]exemplarWithSeriesRef, 0, 512)
}
return b
}
func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) {
if b == nil {
return
}
for i := range b { // Zero out to avoid retaining label data.
b[i].exemplar.Labels = labels.EmptyLabels()
}
h.exemplarsPool.Put(b[:0])
}
func (h *Head) getHistogramBuffer() []record.RefHistogramSample {
b := h.histogramsPool.Get()
if b == nil {
return make([]record.RefHistogramSample, 0, 512)
}
return b
}
func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) {
h.histogramsPool.Put(b[:0])
}
func (h *Head) getFloatHistogramBuffer() []record.RefFloatHistogramSample {
b := h.floatHistogramsPool.Get()
if b == nil {
return make([]record.RefFloatHistogramSample, 0, 512)
}
return b
}
func (h *Head) putFloatHistogramBuffer(b []record.RefFloatHistogramSample) {
h.floatHistogramsPool.Put(b[:0])
}
func (h *Head) getMetadataBuffer() []record.RefMetadata {
b := h.metadataPool.Get()
if b == nil {
return make([]record.RefMetadata, 0, 512)
}
return b
}
func (h *Head) putMetadataBuffer(b []record.RefMetadata) {
h.metadataPool.Put(b[:0])
}
func (h *Head) getSeriesBuffer() []*memSeries {
b := h.seriesPool.Get()
if b == nil {
return make([]*memSeries, 0, 512)
}
return b
}
func (h *Head) putSeriesBuffer(b []*memSeries) {
for i := range b { // Zero out to avoid retaining data.
b[i] = nil
}
h.seriesPool.Put(b[:0])
}
func (h *Head) getTypeMap() map[chunks.HeadSeriesRef]sampleType {
b := h.typeMapPool.Get()
if b == nil {
return make(map[chunks.HeadSeriesRef]sampleType)
}
return b
}
func (h *Head) putTypeMap(b map[chunks.HeadSeriesRef]sampleType) {
clear(b)
h.typeMapPool.Put(b)
}
func (h *Head) getBytesBuffer() []byte {
b := h.bytesPool.Get()
if b == nil {
return make([]byte, 0, 1024)
}
return b
}
func (h *Head) putBytesBuffer(b []byte) {
h.bytesPool.Put(b[:0])
}
type exemplarWithSeriesRef struct {
ref storage.SeriesRef
exemplar exemplar.Exemplar
}
// sampleType describes sample types we need to distinguish for append batching.
// We need separate types for everything that goes into a different WAL record
// type or into a different chunk encoding.
type sampleType byte
const (
stNone sampleType = iota // To mark that the sample type does not matter.
stFloat // All simple floats (counters, gauges, untyped). Goes to `floats`.
stHistogram // Native integer histograms with a standard exponential schema. Goes to `histograms`.
stCustomBucketHistogram // Native integer histograms with custom bucket boundaries. Goes to `histograms`.
stFloatHistogram // Native float histograms. Goes to `floatHistograms`.
stCustomBucketFloatHistogram // Native float histograms with custom bucket boundaries. Goes to `floatHistograms`.
)
// appendBatch is used to partition all the appended data into batches that are
// "type clean", i.e. every series receives only samples of one type within the
// batch. Types in this regard are defined by the sampleType enum above.
// TODO(beorn7): The same concept could be extended to make sure every series in
// the batch has at most one metadata record. This is currently not implemented
// because it is unclear if it is needed at all. (Maybe we will remove metadata
// records altogether, see issue #15911.)
type appendBatch struct {
floats []record.RefSample // New float samples held by this appender.
floatSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histograms []record.RefHistogramSample // New histogram samples held by this appender.
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender.
floatHistogramSeries []*memSeries // FloatHistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
metadata []record.RefMetadata // New metadata held by this appender.
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
}
// close returns all the slices to the pools in Head and nil's them.
func (b *appendBatch) close(h *Head) {
h.putFloatBuffer(b.floats)
b.floats = nil
h.putSeriesBuffer(b.floatSeries)
b.floatSeries = nil
h.putHistogramBuffer(b.histograms)
b.histograms = nil
h.putSeriesBuffer(b.histogramSeries)
b.histogramSeries = nil
h.putFloatHistogramBuffer(b.floatHistograms)
b.floatHistograms = nil
h.putSeriesBuffer(b.floatHistogramSeries)
b.floatHistogramSeries = nil
h.putMetadataBuffer(b.metadata)
b.metadata = nil
h.putSeriesBuffer(b.metadataSeries)
b.metadataSeries = nil
h.putExemplarBuffer(b.exemplars)
b.exemplars = nil
}
type headAppenderBase struct {
head *Head
minValidTime int64 // No samples below this timestamp are allowed.
mint, maxt int64
headMaxt int64 // We track it here to not take the lock for every sample appended.
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
seriesRefs []record.RefSeries // New series records held by this appender.
series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs)
batches []*appendBatch // Holds all the other data to append. (In regular cases, there should be only one of these.)
typesInBatch map[chunks.HeadSeriesRef]sampleType // Which (one) sample type each series holds in the most recent batch.
appendID, cleanupAppendIDsBelow uint64
closed bool
}
type headAppender struct {
headAppenderBase
hints *storage.AppendOptions
}
func (a *headAppender) SetOptions(opts *storage.AppendOptions) {
a.hints = opts
}
func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
// Fail fast if OOO is disabled and the sample is out of bounds.
// Otherwise a full check will be done later to decide if the sample is in-order or out-of-order.
if a.oooTimeWindow == 0 && t < a.minValidTime {
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
return 0, storage.ErrOutOfBounds
}
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
s, _, err = a.getOrCreate(lset)
if err != nil {
return 0, err
}
}
if value.IsStaleNaN(v) {
// If we have added a sample before with this same appender, we
// can check the previously used type and turn a stale float
// sample into a stale histogram sample or stale float histogram
// sample as appropriate. This prevents an unnecessary creation
// of a new batch. However, since other appenders might append
// to the same series concurrently, this is not perfect but just
// an optimization for the more likely case.
switch a.typesInBatch[s.ref] {
case stHistogram, stCustomBucketHistogram:
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
case stFloatHistogram, stCustomBucketFloatHistogram:
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
}
// Note that a series reference not yet in the map will come out
// as stNone, but since we do not handle that case separately,
// we do not need to check for the difference between "unknown
// series" and "known series with stNone".
}
s.Lock()
defer s.Unlock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
isOOO, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err == nil {
if isOOO && a.hints != nil && a.hints.DiscardOutOfOrder {
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
return 0, storage.ErrOutOfOrderSample
}
s.pendingCommit = true
}
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderSample):
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
case errors.Is(err, storage.ErrTooOldSample):
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
}
return 0, err
}
if t < a.mint {
a.mint = t
}
if t > a.maxt {
a.maxt = t
}
b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{
Ref: s.ref,
T: t,
V: v,
})
b.floatSeries = append(b.floatSeries, s)
return storage.SeriesRef(s.ref), nil
}
// AppendSTZeroSample appends synthetic zero sample for st timestamp. It returns
// error when sample can't be appended. See
// storage.StartTimestampAppender.AppendSTZeroSample for further documentation.
func (a *headAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, st int64) (storage.SeriesRef, error) {
if st >= t {
return 0, storage.ErrSTNewerThanSample
}
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
s, _, err = a.getOrCreate(lset)
if err != nil {
return 0, err
}
}
// Check if ST wouldn't be OOO vs samples we already might have for this series.
// NOTE(bwplotka): This will be often hit as it's expected for long living
// counters to share the same ST.
s.Lock()
isOOO, _, err := s.appendable(st, 0, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err == nil {
s.pendingCommit = true
}
s.Unlock()
if err != nil {
return 0, err
}
if isOOO {
return storage.SeriesRef(s.ref), storage.ErrOutOfOrderST
}
if st > a.maxt {
a.maxt = st
}
b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: st, V: 0.0})
b.floatSeries = append(b.floatSeries, s)
return storage.SeriesRef(s.ref), nil
}
func (a *headAppenderBase) getOrCreate(lset labels.Labels) (s *memSeries, created bool, err error) {
// Ensure no empty labels have gotten through.
lset = lset.WithoutEmpty()
if lset.IsEmpty() {
return nil, false, fmt.Errorf("empty labelset: %w", ErrInvalidSample)
}
if l, dup := lset.HasDuplicateLabelNames(); dup {
return nil, false, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample)
}
s, created, err = a.head.getOrCreate(lset.Hash(), lset, true)
if err != nil {
return nil, false, err
}
if created {
a.seriesRefs = append(a.seriesRefs, record.RefSeries{
Ref: s.ref,
Labels: lset,
})
a.series = append(a.series, s)
}
return s, created, nil
}
// getCurrentBatch returns the current batch if it fits the provided sampleType
// for the provided series. Otherwise, it adds a new batch and returns it.
func (a *headAppenderBase) getCurrentBatch(st sampleType, s chunks.HeadSeriesRef) *appendBatch {
h := a.head
newBatch := func() *appendBatch {
b := appendBatch{
floats: h.getFloatBuffer(),
floatSeries: h.getSeriesBuffer(),
histograms: h.getHistogramBuffer(),
histogramSeries: h.getSeriesBuffer(),
floatHistograms: h.getFloatHistogramBuffer(),
floatHistogramSeries: h.getSeriesBuffer(),
metadata: h.getMetadataBuffer(),
metadataSeries: h.getSeriesBuffer(),
}
// Allocate the exemplars buffer only if exemplars are enabled.
if h.opts.EnableExemplarStorage {
b.exemplars = h.getExemplarBuffer()
}
clear(a.typesInBatch)
switch st {
case stHistogram, stFloatHistogram, stCustomBucketHistogram, stCustomBucketFloatHistogram:
// We only record histogram sample types in the map.
// Floats are implicit.
a.typesInBatch[s] = st
}
a.batches = append(a.batches, &b)
return &b
}
// First batch ever. Create it.
if len(a.batches) == 0 {
return newBatch()
}
// TODO(beorn7): If we ever see that the a.typesInBatch map grows so
// large that it matters for total memory consumption, we could limit
// the batch size here, i.e. cut a new batch even without a type change.
// Something like:
// if len(a.typesInBatch > limit) {
// return newBatch()
// }
lastBatch := a.batches[len(a.batches)-1]
if st == stNone {
// Type doesn't matter, last batch will always do.
return lastBatch
}
prevST, ok := a.typesInBatch[s]
switch {
case prevST == st:
// An old series of some histogram type with the same type being appended.
// Continue the batch.
return lastBatch
case !ok && st == stFloat:
// A new float series, or an old float series that gets floats appended.
// Note that we do not track stFloat in typesInBatch.
// Continue the batch.
return lastBatch
case st == stFloat:
// A float being appended to a histogram series.
// Start a new batch.
return newBatch()
case !ok:
// A new series of some histogram type, or some histogram type
// being appended to on old float series. Even in the latter
// case, we don't need to start a new batch because histograms
// after floats are fine.
// Add new sample type to the map and continue batch.
a.typesInBatch[s] = st
return lastBatch
default:
// One histogram type changed to another.
// Start a new batch.
return newBatch()
}
}
// appendable checks whether the given sample is valid for appending to the series.
// If the sample is valid and in-order, it returns false with no error.
// If the sample belongs to the out-of-order chunk, it returns true with no error.
// If the sample cannot be handled, it returns an error.
func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) {
// Check if we can append in the in-order chunk.
if t >= minValidTime {
if s.headChunks == nil {
// The series has no sample and was freshly created.
return false, 0, nil
}
msMaxt := s.maxTime()
if t > msMaxt {
return false, 0, nil
}
if t == msMaxt {
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
if s.lastHistogramValue != nil || s.lastFloatHistogramValue != nil {
return false, 0, storage.NewDuplicateHistogramToFloatErr(t, v)
}
if math.Float64bits(s.lastValue) != math.Float64bits(v) {
return false, 0, storage.NewDuplicateFloatErr(t, s.lastValue, v)
}
// Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf.
return false, 0, nil
}
}
// The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk.
if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow {
return true, headMaxt - t, nil
}
// The sample cannot go in both in-order and out-of-order chunk.
if oooTimeWindow > 0 {
return true, headMaxt - t, storage.ErrTooOldSample
}
if t < minValidTime {
return false, headMaxt - t, storage.ErrOutOfBounds
}
return false, headMaxt - t, storage.ErrOutOfOrderSample
}
// appendableHistogram checks whether the given histogram sample is valid for appending to the series. (if we return false and no error)
// The sample belongs to the out of order chunk if we return true and no error.
// An error signifies the sample cannot be handled.
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) {
// Check if we can append in the in-order chunk.
if t >= minValidTime {
if s.headChunks == nil {
// The series has no sample and was freshly created.
return false, 0, nil
}
msMaxt := s.maxTime()
if t > msMaxt {
return false, 0, nil
}
if t == msMaxt {
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
if !h.Equals(s.lastHistogramValue) {
return false, 0, storage.ErrDuplicateSampleForTimestamp
}
// Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf.
return false, 0, nil
}
}
// The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk.
if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow {
return true, headMaxt - t, nil
}
// The sample cannot go in both in-order and out-of-order chunk.
if oooTimeWindow > 0 {
return true, headMaxt - t, storage.ErrTooOldSample
}
if t < minValidTime {
return false, headMaxt - t, storage.ErrOutOfBounds
}
return false, headMaxt - t, storage.ErrOutOfOrderSample
}
// appendableFloatHistogram checks whether the given float histogram sample is valid for appending to the series. (if we return false and no error)
// The sample belongs to the out of order chunk if we return true and no error.
// An error signifies the sample cannot be handled.
func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) {
// Check if we can append in the in-order chunk.
if t >= minValidTime {
if s.headChunks == nil {
// The series has no sample and was freshly created.
return false, 0, nil
}
msMaxt := s.maxTime()
if t > msMaxt {
return false, 0, nil
}
if t == msMaxt {
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
if !fh.Equals(s.lastFloatHistogramValue) {
return false, 0, storage.ErrDuplicateSampleForTimestamp
}
// Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf.
return false, 0, nil
}
}
// The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk.
if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow {
return true, headMaxt - t, nil
}
// The sample cannot go in both in-order and out-of-order chunk.
if oooTimeWindow > 0 {
return true, headMaxt - t, storage.ErrTooOldSample
}
if t < minValidTime {
return false, headMaxt - t, storage.ErrOutOfBounds
}
return false, headMaxt - t, storage.ErrOutOfOrderSample
}
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
// use getOrCreate or make any of the lset validity checks that Append does.
func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
// Check if exemplar storage is enabled.
if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 {
return 0, nil
}
// Get Series
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
s = a.head.series.getByHash(lset.Hash(), lset)
if s != nil {
ref = storage.SeriesRef(s.ref)
}
}
if s == nil {
return 0, fmt.Errorf("unknown HeadSeriesRef when trying to add exemplar: %d", ref)
}
// Ensure no empty labels have gotten through.
e.Labels = e.Labels.WithoutEmpty()
err := a.head.exemplars.ValidateExemplar(s.labels(), e)
if err != nil {
if errors.Is(err, storage.ErrDuplicateExemplar) || errors.Is(err, storage.ErrExemplarsDisabled) {
// Duplicate, don't return an error but don't accept the exemplar.
return 0, nil
}
return 0, err
}
b := a.getCurrentBatch(stNone, chunks.HeadSeriesRef(ref))
b.exemplars = append(b.exemplars, exemplarWithSeriesRef{ref, e})
return storage.SeriesRef(s.ref), nil
}
func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
// Fail fast if OOO is disabled and the sample is out of bounds.
// Otherwise a full check will be done later to decide if the sample is in-order or out-of-order.
if a.oooTimeWindow == 0 && t < a.minValidTime {
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
return 0, storage.ErrOutOfBounds
}
if h != nil {
if err := h.Validate(); err != nil {
return 0, err
}
}
if fh != nil {
if err := fh.Validate(); err != nil {
return 0, err
}
}
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
s, _, err = a.getOrCreate(lset)
if err != nil {
return 0, err
}
}
switch {
case h != nil:
s.Lock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil {
s.pendingCommit = true
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderSample):
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
case errors.Is(err, storage.ErrTooOldSample):
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
}
return 0, err
}
st := stHistogram
if h.UsesCustomBuckets() {
st = stCustomBucketHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.histograms = append(b.histograms, record.RefHistogramSample{
Ref: s.ref,
T: t,
H: h,
})
b.histogramSeries = append(b.histogramSeries, s)
case fh != nil:
s.Lock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err == nil {
s.pendingCommit = true
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderSample):
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
case errors.Is(err, storage.ErrTooOldSample):
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
}
return 0, err
}
st := stFloatHistogram
if fh.UsesCustomBuckets() {
st = stCustomBucketFloatHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref,
T: t,
FH: fh,
})
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
}
if t < a.mint {
a.mint = t
}
if t > a.maxt {
a.maxt = t
}
return storage.SeriesRef(s.ref), nil
}
func (a *headAppender) AppendHistogramSTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, st int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if st >= t {
return 0, storage.ErrSTNewerThanSample
}
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
s, _, err = a.getOrCreate(lset)
if err != nil {
return 0, err
}
}
switch {
case h != nil:
zeroHistogram := &histogram.Histogram{
// The STZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
CustomValues: h.CustomValues,
}
s.Lock()
// For STZeroSamples OOO is not allowed.
// We set it to true to make this implementation as close as possible to the float implementation.
isOOO, _, err := s.appendableHistogram(st, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil {
s.Unlock()
if errors.Is(err, storage.ErrOutOfOrderSample) {
return 0, storage.ErrOutOfOrderST
}
return 0, err
}
// OOO is not allowed because after the first scrape, ST will be the same for most (if not all) future samples.
// This is to prevent the injected zero from being marked as OOO forever.
if isOOO {
s.Unlock()
return 0, storage.ErrOutOfOrderST
}
s.pendingCommit = true
s.Unlock()
sTyp := stHistogram
if h.UsesCustomBuckets() {
sTyp = stCustomBucketHistogram
}
b := a.getCurrentBatch(sTyp, s.ref)
b.histograms = append(b.histograms, record.RefHistogramSample{
Ref: s.ref,
T: st,
H: zeroHistogram,
})
b.histogramSeries = append(b.histogramSeries, s)
case fh != nil:
zeroFloatHistogram := &histogram.FloatHistogram{
// The STZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
CustomValues: fh.CustomValues,
}
s.Lock()
// We set it to true to make this implementation as close as possible to the float implementation.
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_append_v2.go | tsdb/head_append_v2.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"math"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
)
// initAppenderV2 is a helper to initialize the time bounds of the head
// upon the first sample it receives.
type initAppenderV2 struct {
app storage.AppenderV2
head *Head
}
var _ storage.GetRef = &initAppenderV2{}
func (a *initAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
if a.app == nil {
a.head.initTime(t)
a.app = a.head.appenderV2()
}
return a.app.Append(ref, ls, st, t, v, h, fh, opts)
}
func (a *initAppenderV2) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) {
if g, ok := a.app.(storage.GetRef); ok {
return g.GetRef(lset, hash)
}
return 0, labels.EmptyLabels()
}
func (a *initAppenderV2) Commit() error {
if a.app == nil {
a.head.metrics.activeAppenders.Dec()
return nil
}
return a.app.Commit()
}
func (a *initAppenderV2) Rollback() error {
if a.app == nil {
a.head.metrics.activeAppenders.Dec()
return nil
}
return a.app.Rollback()
}
// AppenderV2 returns a new AppenderV2 on the database.
func (h *Head) AppenderV2(context.Context) storage.AppenderV2 {
h.metrics.activeAppenders.Inc()
// The head cache might not have a starting point yet. The init appender
// picks up the first appended timestamp as the base.
if !h.initialized() {
return &initAppenderV2{
head: h,
}
}
return h.appenderV2()
}
func (h *Head) appenderV2() *headAppenderV2 {
minValidTime := h.appendableMinValidTime()
appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback.
return &headAppenderV2{
headAppenderBase: headAppenderBase{
head: h,
minValidTime: minValidTime,
mint: math.MaxInt64,
maxt: math.MinInt64,
headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
seriesRefs: h.getRefSeriesBuffer(),
series: h.getSeriesBuffer(),
typesInBatch: h.getTypeMap(),
appendID: appendID,
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
},
}
}
type headAppenderV2 struct {
headAppenderBase
}
func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
var (
// Avoid shadowing err variables for reliability.
valErr, appErr, partialErr error
sampleMetricType = sampleMetricTypeFloat
isStale bool
)
// Fail fast on incorrect histograms.
switch {
case fh != nil:
sampleMetricType = sampleMetricTypeHistogram
valErr = fh.Validate()
case h != nil:
sampleMetricType = sampleMetricTypeHistogram
valErr = h.Validate()
}
if valErr != nil {
return 0, valErr
}
// Fail fast if OOO is disabled and the sample is out of bounds.
// Otherwise, a full check will be done later to decide if the sample is in-order or out-of-order.
if a.oooTimeWindow == 0 && t < a.minValidTime {
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricType).Inc()
return 0, storage.ErrOutOfBounds
}
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
s, _, err = a.getOrCreate(ls)
if err != nil {
return 0, err
}
}
// TODO(bwplotka): Handle ST natively (as per PROM-60).
if a.head.opts.EnableSTAsZeroSample && st != 0 {
a.bestEffortAppendSTZeroSample(s, ls, st, t, h, fh)
}
switch {
case fh != nil:
isStale = value.IsStaleNaN(fh.Sum)
appErr = a.appendFloatHistogram(s, t, fh, opts.RejectOutOfOrder)
case h != nil:
isStale = value.IsStaleNaN(h.Sum)
appErr = a.appendHistogram(s, t, h, opts.RejectOutOfOrder)
default:
isStale = value.IsStaleNaN(v)
if isStale {
// If we have added a sample before with this same appender, we
// can check the previously used type and turn a stale float
// sample into a stale histogram sample or stale float histogram
// sample as appropriate. This prevents an unnecessary creation
// of a new batch. However, since other appenders might append
// to the same series concurrently, this is not perfect but just
// an optimization for the more likely case.
switch a.typesInBatch[s.ref] {
case stHistogram, stCustomBucketHistogram:
return a.Append(storage.SeriesRef(s.ref), ls, st, t, 0, &histogram.Histogram{Sum: v}, nil, storage.AOptions{
RejectOutOfOrder: opts.RejectOutOfOrder,
})
case stFloatHistogram, stCustomBucketFloatHistogram:
return a.Append(storage.SeriesRef(s.ref), ls, st, t, 0, nil, &histogram.FloatHistogram{Sum: v}, storage.AOptions{
RejectOutOfOrder: opts.RejectOutOfOrder,
})
}
// Note that a series reference not yet in the map will come out
// as stNone, but since we do not handle that case separately,
// we do not need to check for the difference between "unknown
// series" and "known series with stNone".
}
appErr = a.appendFloat(s, t, v, opts.RejectOutOfOrder)
}
// Handle append error, if any.
if appErr != nil {
switch {
case errors.Is(appErr, storage.ErrOutOfOrderSample):
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricType).Inc()
case errors.Is(appErr, storage.ErrTooOldSample):
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricType).Inc()
}
return 0, appErr
}
if t < a.mint {
a.mint = t
}
if t > a.maxt {
a.maxt = t
}
if isStale {
// For stale values we never attempt to process metadata/exemplars, claim the success.
return storage.SeriesRef(s.ref), nil
}
// Append exemplars if any and if storage was configured for it.
if len(opts.Exemplars) > 0 && a.head.opts.EnableExemplarStorage && a.head.opts.MaxExemplars.Load() > 0 {
// Currently only exemplars can return partial errors.
partialErr = a.appendExemplars(s, opts.Exemplars)
}
// TODO(bwplotka): Move/reuse metadata tests from scrape, once scrape adopts AppenderV2.
// Currently tsdb package does not test metadata.
if a.head.opts.EnableMetadataWALRecords && !opts.Metadata.IsEmpty() {
s.Lock()
metaChanged := s.meta == nil || !s.meta.Equals(opts.Metadata)
s.Unlock()
if metaChanged {
b := a.getCurrentBatch(stNone, s.ref)
b.metadata = append(b.metadata, record.RefMetadata{
Ref: s.ref,
Type: record.GetMetricType(opts.Metadata.Type),
Unit: opts.Metadata.Unit,
Help: opts.Metadata.Help,
})
b.metadataSeries = append(b.metadataSeries, s)
}
}
return storage.SeriesRef(s.ref), partialErr
}
func (a *headAppenderV2) appendFloat(s *memSeries, t int64, v float64, fastRejectOOO bool) error {
s.Lock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
isOOO, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if isOOO && fastRejectOOO {
s.Unlock()
return storage.ErrOutOfOrderSample
}
if err == nil {
s.pendingCommit = true
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
return err
}
b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: t, V: v})
b.floatSeries = append(b.floatSeries, s)
return nil
}
func (a *headAppenderV2) appendHistogram(s *memSeries, t int64, h *histogram.Histogram, fastRejectOOO bool) error {
s.Lock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
isOOO, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if isOOO && fastRejectOOO {
s.Unlock()
return storage.ErrOutOfOrderSample
}
if err == nil {
s.pendingCommit = true
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
return err
}
st := stHistogram
if h.UsesCustomBuckets() {
st = stCustomBucketHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.histograms = append(b.histograms, record.RefHistogramSample{Ref: s.ref, T: t, H: h})
b.histogramSeries = append(b.histogramSeries, s)
return nil
}
func (a *headAppenderV2) appendFloatHistogram(s *memSeries, t int64, fh *histogram.FloatHistogram, fastRejectOOO bool) error {
s.Lock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
isOOO, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if isOOO && fastRejectOOO {
s.Unlock()
return storage.ErrOutOfOrderSample
}
if err == nil {
s.pendingCommit = true
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
return err
}
st := stFloatHistogram
if fh.UsesCustomBuckets() {
st = stCustomBucketFloatHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{Ref: s.ref, T: t, FH: fh})
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
return nil
}
func (a *headAppenderV2) appendExemplars(s *memSeries, exemplar []exemplar.Exemplar) error {
var errs []error
for _, e := range exemplar {
// Ensure no empty labels have gotten through.
e.Labels = e.Labels.WithoutEmpty()
if err := a.head.exemplars.ValidateExemplar(s.labels(), e); err != nil {
if !errors.Is(err, storage.ErrDuplicateExemplar) && !errors.Is(err, storage.ErrExemplarsDisabled) {
// Except duplicates, return partial errors.
errs = append(errs, err)
continue
}
if !errors.Is(err, storage.ErrOutOfOrderExemplar) {
a.head.logger.Debug("Error while adding an exemplar on AppendSample", "exemplars", fmt.Sprintf("%+v", e), "err", err)
}
continue
}
b := a.getCurrentBatch(stNone, s.ref)
b.exemplars = append(b.exemplars, exemplarWithSeriesRef{storage.SeriesRef(s.ref), e})
}
if len(errs) > 0 {
return &storage.AppendPartialError{ExemplarErrors: errs}
}
return nil
}
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
//
// ST is an experimental feature, we don't fail the append on errors, just debug log.
func (a *headAppenderV2) bestEffortAppendSTZeroSample(s *memSeries, ls labels.Labels, st, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) {
// NOTE: Use lset instead of s.lset to avoid locking memSeries. Using s.ref is acceptable without locking.
if st >= t {
a.head.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrSTNewerThanSample)
return
}
if st < a.minValidTime {
a.head.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrOutOfBounds)
return
}
var err error
switch {
case fh != nil:
zeroFloatHistogram := &histogram.FloatHistogram{
// The STZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
CustomValues: fh.CustomValues,
}
err = a.appendFloatHistogram(s, st, zeroFloatHistogram, true)
case h != nil:
zeroHistogram := &histogram.Histogram{
// The STZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
CustomValues: h.CustomValues,
}
err = a.appendHistogram(s, st, zeroHistogram, true)
default:
err = a.appendFloat(s, st, 0, true)
}
if err != nil {
if errors.Is(err, storage.ErrOutOfOrderSample) {
// OOO errors are common and expected (cumulative). Explicitly ignored.
return
}
a.head.logger.Debug("Error when appending ST", "series", s.lset.String(), "st", st, "t", t, "err", err)
return
}
if st > a.maxt {
a.maxt = st
}
}
var _ storage.GetRef = &headAppenderV2{}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_read.go | tsdb/head_read.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"math"
"slices"
"sync"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
)
func (h *Head) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
return h.exemplars.ExemplarQuerier(ctx)
}
// Index returns an IndexReader against the block.
func (h *Head) Index() (IndexReader, error) {
return h.indexRange(math.MinInt64, math.MaxInt64), nil
}
func (h *Head) indexRange(mint, maxt int64) *headIndexReader {
if hmin := h.MinTime(); hmin > mint {
mint = hmin
}
return &headIndexReader{head: h, mint: mint, maxt: maxt}
}
type headIndexReader struct {
head *Head
mint, maxt int64
}
func (*headIndexReader) Close() error {
return nil
}
func (h *headIndexReader) Symbols() index.StringIter {
return h.head.postings.Symbols()
}
// SortedLabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
func (h *headIndexReader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
values, err := h.LabelValues(ctx, name, hints, matchers...)
if err == nil {
slices.Sort(values)
}
return values, err
}
// LabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
func (h *headIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() {
return []string{}, nil
}
if len(matchers) == 0 {
return h.head.postings.LabelValues(ctx, name, hints), nil
}
return labelValuesWithMatchers(ctx, h, name, hints, matchers...)
}
// LabelNames returns all the unique label names present in the head
// that are within the time range mint to maxt.
func (h *headIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) {
if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() {
return []string{}, nil
}
if len(matchers) == 0 {
labelNames := h.head.postings.LabelNames()
slices.Sort(labelNames)
return labelNames, nil
}
return labelNamesWithMatchers(ctx, h, matchers...)
}
// Postings returns the postings list iterator for the label pairs.
func (h *headIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
return h.head.postings.Postings(ctx, name, values...), nil
}
func (h *headIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings {
return h.head.postings.PostingsForLabelMatching(ctx, name, match)
}
func (h *headIndexReader) PostingsForAllLabelValues(ctx context.Context, name string) index.Postings {
return h.head.postings.PostingsForAllLabelValues(ctx, name)
}
func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
series := make([]*memSeries, 0, 128)
notFoundSeriesCount := 0
// Fetch all the series only once.
for p.Next() {
s := h.head.series.getByID(chunks.HeadSeriesRef(p.At()))
if s == nil {
notFoundSeriesCount++
} else {
series = append(series, s)
}
}
if notFoundSeriesCount > 0 {
h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount)
}
if err := p.Err(); err != nil {
return index.ErrPostings(fmt.Errorf("expand postings: %w", err))
}
slices.SortFunc(series, func(a, b *memSeries) int {
return labels.Compare(a.labels(), b.labels())
})
// Convert back to list.
ep := make([]storage.SeriesRef, 0, len(series))
for _, p := range series {
ep = append(ep, storage.SeriesRef(p.ref))
}
return index.NewListPostings(ep)
}
// ShardedPostings implements IndexReader. This function returns an failing postings list if sharding
// has not been enabled in the Head.
func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
if !h.head.opts.EnableSharding {
return index.ErrPostings(errors.New("sharding is disabled"))
}
out := make([]storage.SeriesRef, 0, 128)
notFoundSeriesCount := 0
for p.Next() {
s := h.head.series.getByID(chunks.HeadSeriesRef(p.At()))
if s == nil {
notFoundSeriesCount++
continue
}
// Check if the series belong to the shard.
if s.shardHash%shardCount != shardIndex {
continue
}
out = append(out, storage.SeriesRef(s.ref))
}
if notFoundSeriesCount > 0 {
h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount)
}
return index.NewListPostings(out)
}
// Series returns the series for the given reference.
// Chunks are skipped if chks is nil.
func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
s := h.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
h.head.metrics.seriesNotFound.Inc()
return storage.ErrNotFound
}
builder.Assign(s.labels())
if chks == nil {
return nil
}
s.Lock()
defer s.Unlock()
*chks = (*chks)[:0]
*chks = appendSeriesChunks(s, h.mint, h.maxt, *chks)
return nil
}
func appendSeriesChunks(s *memSeries, mint, maxt int64, chks []chunks.Meta) []chunks.Meta {
for i, c := range s.mmappedChunks {
// Do not expose chunks that are outside of the specified range.
if !c.OverlapsClosedInterval(mint, maxt) {
continue
}
chks = append(chks, chunks.Meta{
MinTime: c.minTime,
MaxTime: c.maxTime,
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))),
})
}
if s.headChunks != nil {
var maxTime int64
var i, j int
for i = s.headChunks.len() - 1; i >= 0; i-- {
chk := s.headChunks.atOffset(i)
if i == 0 {
// Set the head chunk as open (being appended to) for the first headChunk.
maxTime = math.MaxInt64
} else {
maxTime = chk.maxTime
}
if chk.OverlapsClosedInterval(mint, maxt) {
chks = append(chks, chunks.Meta{
MinTime: chk.minTime,
MaxTime: maxTime,
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))),
})
}
j++
}
}
return chks
}
// headChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos]
// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list.
func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.firstChunkID
}
const oooChunkIDMask = 1 << 23
// oooHeadChunkID returns the HeadChunkID referred to by the given position.
// Only the bottom 24 bits are used. Bit 23 is always 1 for an OOO chunk; for the rest:
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID {
return (chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID) | oooChunkIDMask
}
func unpackHeadChunkRef(ref chunks.ChunkRef) (seriesID chunks.HeadSeriesRef, chunkID chunks.HeadChunkID, isOOO bool) {
sid, cid := chunks.HeadChunkRef(ref).Unpack()
return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0
}
// LabelNamesFor returns all the label names for the series referred to by the postings.
// The names returned are sorted.
func (h *headIndexReader) LabelNamesFor(ctx context.Context, series index.Postings) ([]string, error) {
namesMap := make(map[string]struct{})
i := 0
for series.Next() {
i++
if i%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return nil, ctx.Err()
}
memSeries := h.head.series.getByID(chunks.HeadSeriesRef(series.At()))
if memSeries == nil {
// Series not found, this happens during compaction,
// when series was garbage collected after the caller got the series IDs.
continue
}
memSeries.labels().Range(func(lbl labels.Label) {
namesMap[lbl.Name] = struct{}{}
})
}
if err := series.Err(); err != nil {
return nil, err
}
names := make([]string, 0, len(namesMap))
for name := range namesMap {
names = append(names, name)
}
slices.Sort(names)
return names, nil
}
// Chunks returns a ChunkReader against the block.
func (h *Head) Chunks() (ChunkReader, error) {
return h.chunksRange(math.MinInt64, math.MaxInt64, h.iso.State(math.MinInt64, math.MaxInt64))
}
func (h *Head) chunksRange(mint, maxt int64, is *isolationState) (*headChunkReader, error) {
h.closedMtx.Lock()
defer h.closedMtx.Unlock()
if h.closed {
return nil, errors.New("can't read from a closed head")
}
if hmin := h.MinTime(); hmin > mint {
mint = hmin
}
return &headChunkReader{
head: h,
mint: mint,
maxt: maxt,
isoState: is,
}, nil
}
type headChunkReader struct {
head *Head
mint, maxt int64
isoState *isolationState
}
func (h *headChunkReader) Close() error {
if h.isoState != nil {
h.isoState.Close()
}
return nil
}
// ChunkOrIterable returns the chunk for the reference number.
func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
chk, _, err := h.chunk(meta, false)
return chk, nil, err
}
type ChunkReaderWithCopy interface {
ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error)
}
// ChunkOrIterableWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk, plus the max time of the chunk.
func (h *headChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
chk, maxTime, err := h.chunk(meta, true)
return chk, nil, maxTime, err
}
// chunk returns the chunk for the reference number.
// If copyLastChunk is true, then it makes a copy of the head chunk if asked for it.
// Also returns max time of the chunk.
func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
sid, cid, isOOO := unpackHeadChunkRef(meta.Ref)
s := h.head.series.getByID(sid)
// This means that the series has been garbage collected.
if s == nil {
return nil, 0, storage.ErrNotFound
}
s.Lock()
defer s.Unlock()
return h.head.chunkFromSeries(s, cid, isOOO, h.mint, h.maxt, h.isoState, copyLastChunk)
}
// Dumb thing to defeat chunk pool.
type wrapOOOHeadChunk struct {
chunkenc.Chunk
}
// Call with s locked.
func (h *Head) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, isOOO bool, mint, maxt int64, isoState *isolationState, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
if isOOO {
chk, maxTime, err := s.oooChunk(cid, h.chunkDiskMapper, &h.memChunkPool)
return wrapOOOHeadChunk{chk}, maxTime, err
}
c, headChunk, isOpen, err := s.chunk(cid, h.chunkDiskMapper, &h.memChunkPool)
if err != nil {
return nil, 0, err
}
defer func() {
if !headChunk {
// Set this to nil so that Go GC can collect it after it has been used.
c.chunk = nil
c.prev = nil
h.memChunkPool.Put(c)
}
}()
// This means that the chunk is outside the specified range.
if !c.OverlapsClosedInterval(mint, maxt) {
return nil, 0, storage.ErrNotFound
}
chk, maxTime := c.chunk, c.maxTime
if headChunk && isOpen && copyLastChunk {
// The caller may ask to copy the head chunk in order to take the
// bytes of the chunk without causing the race between read and append.
b := s.headChunks.chunk.Bytes()
newB := make([]byte, len(b))
copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
// TODO(codesome): Put back in the pool (non-trivial).
chk, err = h.opts.ChunkPool.Get(s.headChunks.chunk.Encoding(), newB)
if err != nil {
return nil, 0, err
}
}
return &safeHeadChunk{
Chunk: chk,
s: s,
cid: cid,
isoState: isoState,
}, maxTime, nil
}
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
// If headChunk is false, it means that the returned *memChunk
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
// if isOpen is true, it means that the returned *memChunk is used for appends.
func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk, isOpen bool, err error) {
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
// is >= len(s.mmappedChunks), it represents one of the chunks on s.headChunks linked list.
// The order of elements is different for slice and linked list.
// For s.mmappedChunks slice newer chunks are appended to it.
// For s.headChunks list newer chunks are prepended to it.
//
// memSeries {
// mmappedChunks: [t0, t1, t2]
// headChunk: {t5}->{t4}->{t3}
// }
ix := int(id) - int(s.firstChunkID)
var headChunksLen int
if s.headChunks != nil {
headChunksLen = s.headChunks.len()
}
if ix < 0 || ix > len(s.mmappedChunks)+headChunksLen-1 {
return nil, false, false, storage.ErrNotFound
}
if ix < len(s.mmappedChunks) {
chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref)
if err != nil {
var cerr *chunks.CorruptionErr
if errors.As(err, &cerr) {
panic(err)
}
return nil, false, false, err
}
mc := memChunkPool.Get().(*memChunk)
mc.chunk = chk
mc.minTime = s.mmappedChunks[ix].minTime
mc.maxTime = s.mmappedChunks[ix].maxTime
return mc, false, false, nil
}
ix -= len(s.mmappedChunks)
offset := headChunksLen - ix - 1
// headChunks is a linked list where first element is the most recent one and the last one is the oldest.
// This order is reversed when compared with mmappedChunks, since mmappedChunks[0] is the oldest chunk,
// while headChunk.atOffset(0) would give us the most recent chunk.
// So when calling headChunk.atOffset() we need to reverse the value of ix.
elem := s.headChunks.atOffset(offset)
if elem == nil {
// This should never really happen and would mean that headChunksLen value is NOT equal
// to the length of the headChunks list.
return nil, false, false, storage.ErrNotFound
}
return elem, true, offset == 0, nil
}
// oooChunk returns the chunk for the HeadChunkID by m-mapping it from the disk.
// It never returns the head OOO chunk.
func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, _ *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) {
// ix represents the index of chunk in the s.ooo.oooMmappedChunks slice. The chunk id's are
// incremented by 1 when new chunk is created, hence (id - firstOOOChunkID) gives the slice index.
ix := int(id) - int(s.ooo.firstOOOChunkID)
if ix < 0 || ix >= len(s.ooo.oooMmappedChunks) {
return nil, 0, storage.ErrNotFound
}
chk, err := chunkDiskMapper.Chunk(s.ooo.oooMmappedChunks[ix].ref)
return chk, s.ooo.oooMmappedChunks[ix].maxTime, err
}
// safeHeadChunk makes sure that the chunk can be accessed without a race condition.
type safeHeadChunk struct {
chunkenc.Chunk
s *memSeries
cid chunks.HeadChunkID
isoState *isolationState
}
func (c *safeHeadChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator {
c.s.Lock()
it := c.s.iterator(c.cid, c.Chunk, c.isoState, reuseIter)
c.s.Unlock()
return it
}
// iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range.
// It is unsafe to call this concurrently with s.append(...) without holding the series lock.
func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState *isolationState, it chunkenc.Iterator) chunkenc.Iterator {
ix := int(id) - int(s.firstChunkID)
numSamples := c.NumSamples()
stopAfter := numSamples
if isoState != nil && !isoState.IsolationDisabled() {
totalSamples := 0 // Total samples in this series.
previousSamples := 0 // Samples before this chunk.
for j, d := range s.mmappedChunks {
totalSamples += int(d.numSamples)
if j < ix {
previousSamples += int(d.numSamples)
}
}
ix -= len(s.mmappedChunks)
if s.headChunks != nil {
// Iterate all head chunks from the oldest to the newest.
headChunksLen := s.headChunks.len()
for j := headChunksLen - 1; j >= 0; j-- {
chk := s.headChunks.atOffset(j)
chkSamples := chk.chunk.NumSamples()
totalSamples += chkSamples
// Chunk ID is len(s.mmappedChunks) + $(headChunks list position).
// Where $(headChunks list position) is zero for the oldest chunk and $(s.headChunks.len() - 1)
// for the newest (open) chunk.
if headChunksLen-1-j < ix {
previousSamples += chkSamples
}
}
}
// Removing the extra transactionIDs that are relevant for samples that
// come after this chunk, from the total transactionIDs.
appendIDsToConsider := int(s.txs.txIDCount) - (totalSamples - (previousSamples + numSamples))
// Iterate over the appendIDs, find the first one that the isolation state says not
// to return.
it := s.txs.iterator()
for index := range appendIDsToConsider {
appendID := it.At()
if appendID <= isoState.maxAppendID { // Easy check first.
if _, ok := isoState.incompleteAppends[appendID]; !ok {
it.Next()
continue
}
}
// Stopped in a previous chunk.
stopAfter = max(numSamples-(appendIDsToConsider-index), 0)
break
}
}
if stopAfter == 0 {
return chunkenc.NewNopIterator()
}
if stopAfter == numSamples {
return c.Iterator(it)
}
return makeStopIterator(c, it, stopAfter)
}
// stopIterator wraps an Iterator, but only returns the first
// stopAfter values, if initialized with i=-1.
type stopIterator struct {
chunkenc.Iterator
i, stopAfter int
}
func (it *stopIterator) Next() chunkenc.ValueType {
if it.i+1 >= it.stopAfter {
return chunkenc.ValNone
}
it.i++
return it.Iterator.Next()
}
func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chunkenc.Iterator {
// Re-use the Iterator object if it is a stopIterator.
if stopIter, ok := it.(*stopIterator); ok {
stopIter.Iterator = c.Iterator(stopIter.Iterator)
stopIter.i = -1
stopIter.stopAfter = stopAfter
return stopIter
}
return &stopIterator{
Iterator: c.Iterator(it),
i: -1,
stopAfter: stopAfter,
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/ooo_head_test.go | tsdb/ooo_head_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"math"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
const testMaxSize int = 32
// Formulas chosen to make testing easy.
func valEven(pos int) int64 { return int64(pos*2 + 2) } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values
func valOdd(pos int) int64 { return int64(pos*2 + 1) } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals.
func makeEvenSampleSlice(n int, sampleFunc func(ts int64) sample) []sample {
s := make([]sample, n)
for i := range n {
s[i] = sampleFunc(valEven(i))
}
return s
}
// TestOOOInsert tests the following cases:
// - Number of pre-existing samples anywhere from 0 to testMaxSize-1.
// - Insert new sample before first pre-existing samples, after the last, and anywhere in between.
// - With a chunk initial capacity of testMaxSize/8 and testMaxSize, which lets us test non-full and full chunks, and chunks that need to expand themselves.
func TestOOOInsert(t *testing.T) {
scenarios := map[string]struct {
sampleFunc func(ts int64) sample
}{
"float": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, f: float64(ts)}
},
},
"integer histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
"float histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
testOOOInsert(t, scenario.sampleFunc)
})
}
}
func testOOOInsert(t *testing.T,
sampleFunc func(ts int64) sample,
) {
for numPreExisting := 0; numPreExisting <= testMaxSize; numPreExisting++ {
// For example, if we have numPreExisting 2, then:
// chunk.samples indexes filled 0 1
// chunk.samples with these values 2 4 // valEven
// we want to test inserting at index 0 1 2 // insertPos=0..numPreExisting
// we can do this by using values 1, 3 5 // valOdd(insertPos)
for insertPos := 0; insertPos <= numPreExisting; insertPos++ {
chunk := NewOOOChunk()
chunk.samples = make([]sample, numPreExisting)
chunk.samples = makeEvenSampleSlice(numPreExisting, sampleFunc)
newSample := sampleFunc(valOdd(insertPos))
chunk.Insert(newSample.t, newSample.f, newSample.h, newSample.fh)
var expSamples []sample
// Our expected new samples slice, will be first the original samples.
for i := 0; i < insertPos; i++ {
expSamples = append(expSamples, sampleFunc(valEven(i)))
}
// Then the new sample.
expSamples = append(expSamples, newSample)
// Followed by any original samples that were pushed back by the new one.
for i := insertPos; i < numPreExisting; i++ {
expSamples = append(expSamples, sampleFunc(valEven(i)))
}
require.Equal(t, expSamples, chunk.samples, "numPreExisting %d, insertPos %d", numPreExisting, insertPos)
}
}
}
// TestOOOInsertDuplicate tests the correct behavior when inserting a sample that is a duplicate of any
// pre-existing samples, with between 1 and testMaxSize pre-existing samples and
// with a chunk initial capacity of testMaxSize/8 and testMaxSize, which lets us test non-full and full chunks, and chunks that need to expand themselves.
func TestOOOInsertDuplicate(t *testing.T) {
scenarios := map[string]struct {
sampleFunc func(ts int64) sample
}{
"float": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, f: float64(ts)}
},
},
"integer histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
"float histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
testOOOInsertDuplicate(t, scenario.sampleFunc)
})
}
}
func testOOOInsertDuplicate(t *testing.T,
sampleFunc func(ts int64) sample,
) {
for num := 1; num <= testMaxSize; num++ {
for dupPos := 0; dupPos < num; dupPos++ {
chunk := NewOOOChunk()
chunk.samples = makeEvenSampleSlice(num, sampleFunc)
dupSample := chunk.samples[dupPos]
dupSample.f = 0.123
ok := chunk.Insert(dupSample.t, dupSample.f, dupSample.h, dupSample.fh)
expSamples := makeEvenSampleSlice(num, sampleFunc) // We expect no change.
require.False(t, ok)
require.Equal(t, expSamples, chunk.samples, "num %d, dupPos %d", num, dupPos)
}
}
}
type chunkVerify struct {
encoding chunkenc.Encoding
minTime int64
maxTime int64
}
func TestOOOChunks_ToEncodedChunks(t *testing.T) {
h1 := tsdbutil.GenerateTestHistogram(1)
// Make h2 appendable but with more buckets, to trigger recoding.
h2 := h1.Copy()
h2.PositiveSpans = append(h2.PositiveSpans, histogram.Span{Offset: 1, Length: 1})
h2.PositiveBuckets = append(h2.PositiveBuckets, 12)
h2explicit := h2.Copy()
h2explicit.CounterResetHint = histogram.CounterReset
testCases := map[string]struct {
samples []sample
expectedCounterResets []histogram.CounterResetHint
expectedChunks []chunkVerify
}{
"empty": {
samples: []sample{},
},
"has floats": {
samples: []sample{
{t: 1000, f: 43.0},
{t: 1100, f: 42.0},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1100},
},
},
"mix of floats and histograms": {
samples: []sample{
{t: 1000, f: 43.0},
{t: 1100, h: h1},
{t: 1200, f: 42.0},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1000},
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
{encoding: chunkenc.EncXOR, minTime: 1200, maxTime: 1200},
},
},
"has an implicit counter reset": {
samples: []sample{
{t: 1000, h: h2},
{t: 1100, h: h1},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000},
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
},
},
"has an explicit counter reset": {
samples: []sample{
{t: 1100, h: h2explicit},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
},
},
"has an explicit counter reset inside": {
samples: []sample{
{t: 1000, h: h1},
{t: 1100, h: h2explicit},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000},
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
},
},
"has a recoded histogram": { // Regression test for wrong minT, maxT in histogram recoding.
samples: []sample{
{t: 0, h: h1},
{t: 1, h: h2},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 0, maxTime: 1},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// Sanity check.
require.Len(t, tc.expectedCounterResets, len(tc.samples), "number of samples and counter resets")
oooChunk := OOOChunk{}
for _, s := range tc.samples {
switch s.Type() {
case chunkenc.ValFloat:
oooChunk.Insert(s.t, s.f, nil, nil)
case chunkenc.ValHistogram:
oooChunk.Insert(s.t, 0, s.h.Copy(), nil)
case chunkenc.ValFloatHistogram:
oooChunk.Insert(s.t, 0, nil, s.fh.Copy())
default:
t.Fatalf("unexpected sample type %d", s.Type())
}
}
chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
require.Len(t, chunks, len(tc.expectedChunks), "number of chunks")
sampleIndex := 0
for i, c := range chunks {
require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i)
require.Equal(t, tc.expectedChunks[i].minTime, c.minTime, "chunk %d minTime", i)
require.Equal(t, tc.expectedChunks[i].maxTime, c.maxTime, "chunk %d maxTime", i)
samples, err := storage.ExpandSamples(c.chunk.Iterator(nil), newSample)
require.GreaterOrEqual(t, len(tc.samples)-sampleIndex, len(samples), "too many samples in chunk %d expected less than %d", i, len(tc.samples)-sampleIndex)
require.NoError(t, err)
if len(samples) == 0 {
// Ignore empty chunks.
continue
}
switch c.chunk.Encoding() {
case chunkenc.EncXOR:
for j, s := range samples {
require.Equal(t, chunkenc.ValFloat, s.Type())
// XOR chunks don't have counter reset hints, so we shouldn't expect anything else than UnknownCounterReset.
require.Equal(t, histogram.UnknownCounterReset, tc.expectedCounterResets[sampleIndex+j], "sample reset hint %d", sampleIndex+j)
require.Equal(t, tc.samples[sampleIndex+j].f, s.F(), "sample %d", sampleIndex+j)
}
case chunkenc.EncHistogram:
for j, s := range samples {
require.Equal(t, chunkenc.ValHistogram, s.Type())
require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.H().CounterResetHint, "sample reset hint %d", sampleIndex+j)
compareTo := tc.samples[sampleIndex+j].h.Copy()
compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j]
require.Equal(t, compareTo, s.H().Compact(0), "sample %d", sampleIndex+j)
}
case chunkenc.EncFloatHistogram:
for j, s := range samples {
require.Equal(t, chunkenc.ValFloatHistogram, s.Type())
require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.FH().CounterResetHint, "sample reset hint %d", sampleIndex+j)
compareTo := tc.samples[sampleIndex+j].fh.Copy()
compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j]
require.Equal(t, compareTo, s.FH().Compact(0), "sample %d", sampleIndex+j)
}
}
sampleIndex += len(samples)
}
require.Equal(t, len(tc.samples), sampleIndex, "number of samples")
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head.go | tsdb/head.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"math"
"path/filepath"
"runtime"
"strconv"
"sync"
"time"
"github.com/oklog/ulid/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/zeropool"
)
var (
// ErrInvalidSample is returned if an appended sample is not valid and can't
// be ingested.
ErrInvalidSample = errors.New("invalid sample")
// ErrInvalidExemplar is returned if an appended exemplar is not valid and can't
// be ingested.
ErrInvalidExemplar = errors.New("invalid exemplar")
// ErrAppenderClosed is returned if an appender has already be successfully
// rolled back or committed.
ErrAppenderClosed = errors.New("appender closed")
// defaultIsolationDisabled is true if isolation is disabled by default.
defaultIsolationDisabled = false
defaultWALReplayConcurrency = runtime.GOMAXPROCS(0)
)
// Head handles reads and writes of time series data within a time window.
type Head struct {
chunkRange atomic.Int64
numSeries atomic.Uint64
numStaleSeries atomic.Uint64
minOOOTime, maxOOOTime atomic.Int64 // TODO(jesusvazquez) These should be updated after garbage collection.
minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. TODO(jesusvazquez) Ensure these are properly tracked.
minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block.
lastWALTruncationTime atomic.Int64
lastMemoryTruncationTime atomic.Int64
lastSeriesID atomic.Uint64
// All the ooo m-map chunks should be after this. This is used to truncate old ooo m-map chunks.
// This should be typecasted to chunks.ChunkDiskMapperRef after loading.
minOOOMmapRef atomic.Uint64
metrics *headMetrics
opts *HeadOptions
wal, wbl *wlog.WL
exemplarMetrics *ExemplarMetrics
exemplars ExemplarStorage
logger *slog.Logger
refSeriesPool zeropool.Pool[[]record.RefSeries]
floatsPool zeropool.Pool[[]record.RefSample]
exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef]
histogramsPool zeropool.Pool[[]record.RefHistogramSample]
floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
metadataPool zeropool.Pool[[]record.RefMetadata]
seriesPool zeropool.Pool[[]*memSeries]
typeMapPool zeropool.Pool[map[chunks.HeadSeriesRef]sampleType]
bytesPool zeropool.Pool[[]byte]
memChunkPool sync.Pool
// These pools are only used during WAL/WBL replay and are reset at the end.
// NOTE: Adjust resetWLReplayResources() upon changes to the pools.
wlReplaySeriesPool zeropool.Pool[[]record.RefSeries]
wlReplaySamplesPool zeropool.Pool[[]record.RefSample]
wlReplaytStonesPool zeropool.Pool[[]tombstones.Stone]
wlReplayExemplarsPool zeropool.Pool[[]record.RefExemplar]
wlReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample]
wlReplayFloatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
wlReplayMetadataPool zeropool.Pool[[]record.RefMetadata]
wlReplayMmapMarkersPool zeropool.Pool[[]record.RefMmapMarker]
// All series addressable by their ID or hash.
series *stripeSeries
walExpiriesMtx sync.Mutex
walExpiries map[chunks.HeadSeriesRef]int64 // Series no longer in the head, and what time they must be kept until.
// TODO(codesome): Extend MemPostings to return only OOOPostings, Set OOOStatus, ... Like an additional map of ooo postings.
postings *index.MemPostings // Postings lists for terms.
tombstones *tombstones.MemTombstones
iso *isolation
oooIso *oooIsolation
cardinalityMutex sync.Mutex
cardinalityCache *index.PostingsStats // Posting stats cache which will expire after 30sec.
cardinalityCacheKey string
lastPostingsStatsCall time.Duration // Last posting stats call (PostingsCardinalityStats()) time for caching.
// chunkDiskMapper is used to write and read Head chunks to/from disk.
chunkDiskMapper *chunks.ChunkDiskMapper
chunkSnapshotMtx sync.Mutex
closedMtx sync.Mutex
closed bool
stats *HeadStats
reg prometheus.Registerer
writeNotified wlog.WriteNotified
memTruncationInProcess atomic.Bool
memTruncationCallBack func() // For testing purposes.
}
type ExemplarStorage interface {
storage.ExemplarQueryable
AddExemplar(labels.Labels, exemplar.Exemplar) error
ValidateExemplar(labels.Labels, exemplar.Exemplar) error
IterateExemplars(f func(seriesLabels labels.Labels, e exemplar.Exemplar) error) error
}
// HeadOptions are parameters for the Head block.
type HeadOptions struct {
// Runtime reloadable option. At the top of the struct for 32 bit OS:
// https://pkg.go.dev/sync/atomic#pkg-note-BUG
MaxExemplars atomic.Int64
OutOfOrderTimeWindow atomic.Int64
OutOfOrderCapMax atomic.Int64
ChunkRange int64
// ChunkDirRoot is the parent directory of the chunks directory.
ChunkDirRoot string
ChunkPool chunkenc.Pool
ChunkWriteBufferSize int
ChunkWriteQueueSize int
SamplesPerChunk int
// StripeSize sets the number of entries in the hash map, it must be a power of 2.
// A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series.
// A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series.
StripeSize int
SeriesCallback SeriesLifecycleCallback
EnableExemplarStorage bool
EnableMemorySnapshotOnShutdown bool
IsolationDisabled bool
// Maximum number of CPUs that can simultaneously processes WAL replay.
// The default value is GOMAXPROCS.
// If it is set to a negative value or zero, the default value is used.
WALReplayConcurrency int
// EnableSharding enables ShardedPostings() support in the Head.
EnableSharding bool
// EnableSTAsZeroSample represents 'created-timestamp-zero-ingestion' feature flag.
// If true, ST, if non-empty and earlier than sample timestamp, will be stored
// as a zero sample before the actual sample.
//
// The zero sample is best-effort, only debug log on failure is emitted.
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableSTAsZeroSample bool
// EnableMetadataWALRecords represents 'metadata-wal-records' feature flag.
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableMetadataWALRecords bool
}
const (
// DefaultOutOfOrderCapMax is the default maximum size of an in-memory out-of-order chunk.
DefaultOutOfOrderCapMax int64 = 32
// DefaultSamplesPerChunk provides a default target number of samples per chunk.
DefaultSamplesPerChunk = 120
)
func DefaultHeadOptions() *HeadOptions {
ho := &HeadOptions{
ChunkRange: DefaultBlockDuration,
ChunkDirRoot: "",
ChunkPool: chunkenc.NewPool(),
ChunkWriteBufferSize: chunks.DefaultWriteBufferSize,
ChunkWriteQueueSize: chunks.DefaultWriteQueueSize,
SamplesPerChunk: DefaultSamplesPerChunk,
StripeSize: DefaultStripeSize,
SeriesCallback: &noopSeriesLifecycleCallback{},
IsolationDisabled: defaultIsolationDisabled,
WALReplayConcurrency: defaultWALReplayConcurrency,
}
ho.OutOfOrderCapMax.Store(DefaultOutOfOrderCapMax)
return ho
}
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
// All the callbacks should be safe to be called concurrently.
// It is up to the user to implement soft or hard consistency by making the callbacks
// atomic or non-atomic. Atomic callbacks can cause degradation performance.
type SeriesLifecycleCallback interface {
// PreCreation is called before creating a series to indicate if the series can be created.
// A non nil error means the series should not be created.
PreCreation(labels.Labels) error
// PostCreation is called after creating a series to indicate a creation of series.
PostCreation(labels.Labels)
// PostDeletion is called after deletion of series.
PostDeletion(map[chunks.HeadSeriesRef]labels.Labels)
}
// NewHead opens the head block in dir.
func NewHead(r prometheus.Registerer, l *slog.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) {
var err error
if l == nil {
l = promslog.NewNopLogger()
}
if opts.OutOfOrderTimeWindow.Load() < 0 {
opts.OutOfOrderTimeWindow.Store(0)
}
// Time window can be set on runtime. So the capMin and capMax should be valid
// even if ooo is not enabled yet.
capMax := opts.OutOfOrderCapMax.Load()
if capMax <= 0 || capMax > 255 {
return nil, fmt.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax)
}
if opts.ChunkRange < 1 {
return nil, fmt.Errorf("invalid chunk range %d", opts.ChunkRange)
}
if opts.SeriesCallback == nil {
opts.SeriesCallback = &noopSeriesLifecycleCallback{}
}
if stats == nil {
stats = NewHeadStats()
}
if !opts.EnableExemplarStorage {
opts.MaxExemplars.Store(0)
}
h := &Head{
wal: wal,
wbl: wbl,
logger: l,
opts: opts,
memChunkPool: sync.Pool{
New: func() any {
return &memChunk{}
},
},
stats: stats,
reg: r,
}
if err := h.resetInMemoryState(); err != nil {
return nil, err
}
if opts.ChunkPool == nil {
opts.ChunkPool = chunkenc.NewPool()
}
if opts.WALReplayConcurrency <= 0 {
opts.WALReplayConcurrency = defaultWALReplayConcurrency
}
h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(
r,
mmappedChunksDir(opts.ChunkDirRoot),
opts.ChunkPool,
opts.ChunkWriteBufferSize,
opts.ChunkWriteQueueSize,
)
if err != nil {
return nil, err
}
h.metrics = newHeadMetrics(h, r)
return h, nil
}
func (h *Head) resetInMemoryState() error {
var err error
var em *ExemplarMetrics
if h.exemplars != nil {
ce, ok := h.exemplars.(*CircularExemplarStorage)
if ok {
em = ce.metrics
}
}
if em == nil {
em = NewExemplarMetrics(h.reg)
}
es, err := NewCircularExemplarStorage(h.opts.MaxExemplars.Load(), em)
if err != nil {
return err
}
if h.series != nil {
// reset the existing series to make sure we call the appropriated hooks
// and increment the series removed metrics
fs := h.series.iterForDeletion(func(_ int, _ uint64, s *memSeries, flushedForCallback map[chunks.HeadSeriesRef]labels.Labels) {
// All series should be flushed
flushedForCallback[s.ref] = s.lset
})
h.metrics.seriesRemoved.Add(float64(fs))
}
h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback)
h.iso = newIsolation(h.opts.IsolationDisabled)
h.oooIso = newOOOIsolation()
h.numSeries.Store(0)
h.exemplarMetrics = em
h.exemplars = es
h.postings = index.NewUnorderedMemPostings()
h.tombstones = tombstones.NewMemTombstones()
h.walExpiries = map[chunks.HeadSeriesRef]int64{}
h.chunkRange.Store(h.opts.ChunkRange)
h.minTime.Store(math.MaxInt64)
h.maxTime.Store(math.MinInt64)
h.minOOOTime.Store(math.MaxInt64)
h.maxOOOTime.Store(math.MinInt64)
h.lastWALTruncationTime.Store(math.MinInt64)
h.lastMemoryTruncationTime.Store(math.MinInt64)
return nil
}
func (h *Head) resetWLReplayResources() {
h.wlReplaySeriesPool = zeropool.Pool[[]record.RefSeries]{}
h.wlReplaySamplesPool = zeropool.Pool[[]record.RefSample]{}
h.wlReplaytStonesPool = zeropool.Pool[[]tombstones.Stone]{}
h.wlReplayExemplarsPool = zeropool.Pool[[]record.RefExemplar]{}
h.wlReplayHistogramsPool = zeropool.Pool[[]record.RefHistogramSample]{}
h.wlReplayFloatHistogramsPool = zeropool.Pool[[]record.RefFloatHistogramSample]{}
h.wlReplayMetadataPool = zeropool.Pool[[]record.RefMetadata]{}
h.wlReplayMmapMarkersPool = zeropool.Pool[[]record.RefMmapMarker]{}
}
type headMetrics struct {
activeAppenders prometheus.Gauge
series prometheus.GaugeFunc
staleSeries prometheus.GaugeFunc
seriesCreated prometheus.Counter
seriesRemoved prometheus.Counter
seriesNotFound prometheus.Counter
chunks prometheus.Gauge
chunksCreated prometheus.Counter
chunksRemoved prometheus.Counter
gcDuration prometheus.Summary
samplesAppended *prometheus.CounterVec
outOfOrderSamplesAppended *prometheus.CounterVec
outOfBoundSamples *prometheus.CounterVec
outOfOrderSamples *prometheus.CounterVec
tooOldSamples *prometheus.CounterVec
walTruncateDuration prometheus.Summary
walCorruptionsTotal prometheus.Counter
dataTotalReplayDuration prometheus.Gauge
headTruncateFail prometheus.Counter
headTruncateTotal prometheus.Counter
checkpointDeleteFail prometheus.Counter
checkpointDeleteTotal prometheus.Counter
checkpointCreationFail prometheus.Counter
checkpointCreationTotal prometheus.Counter
mmapChunkCorruptionTotal prometheus.Counter
snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1.
oooHistogram prometheus.Histogram
mmapChunksTotal prometheus.Counter
walReplayUnknownRefsTotal *prometheus.CounterVec
wblReplayUnknownRefsTotal *prometheus.CounterVec
}
const (
sampleMetricTypeFloat = "float"
sampleMetricTypeHistogram = "histogram"
)
func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
m := &headMetrics{
activeAppenders: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_active_appenders",
Help: "Number of currently active appender transactions",
}),
series: prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_series",
Help: "Total number of series in the head block.",
}, func() float64 {
return float64(h.NumSeries())
}),
staleSeries: prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_stale_series",
Help: "Total number of stale series in the head block.",
}, func() float64 {
return float64(h.NumStaleSeries())
}),
seriesCreated: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_series_created_total",
Help: "Total number of series created in the head",
}),
seriesRemoved: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_series_removed_total",
Help: "Total number of series removed in the head",
}),
seriesNotFound: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_series_not_found_total",
Help: "Total number of requests for series that were not found.",
}),
chunks: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_chunks",
Help: "Total number of chunks in the head block.",
}),
chunksCreated: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_chunks_created_total",
Help: "Total number of chunks created in the head",
}),
chunksRemoved: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_chunks_removed_total",
Help: "Total number of chunks removed in the head",
}),
gcDuration: prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_head_gc_duration_seconds",
Help: "Runtime of garbage collection in the head block.",
}),
walTruncateDuration: prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_truncate_duration_seconds",
Help: "Duration of WAL truncation.",
}),
walCorruptionsTotal: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_corruptions_total",
Help: "Total number of WAL corruptions.",
}),
dataTotalReplayDuration: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_data_replay_duration_seconds",
Help: "Time taken to replay the data on disk.",
}),
samplesAppended: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_samples_appended_total",
Help: "Total number of appended samples.",
}, []string{"type"}),
outOfOrderSamplesAppended: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_out_of_order_samples_appended_total",
Help: "Total number of appended out of order samples.",
}, []string{"type"}),
outOfBoundSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_out_of_bound_samples_total",
Help: "Total number of out of bound samples ingestion failed attempts with out of order support disabled.",
}, []string{"type"}),
outOfOrderSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_out_of_order_samples_total",
Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.",
}, []string{"type"}),
tooOldSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_too_old_samples_total",
Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window.",
}, []string{"type"}),
headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_truncations_failed_total",
Help: "Total number of head truncations that failed.",
}),
headTruncateTotal: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_truncations_total",
Help: "Total number of head truncations attempted.",
}),
checkpointDeleteFail: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_checkpoint_deletions_failed_total",
Help: "Total number of checkpoint deletions that failed.",
}),
checkpointDeleteTotal: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_checkpoint_deletions_total",
Help: "Total number of checkpoint deletions attempted.",
}),
checkpointCreationFail: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_checkpoint_creations_failed_total",
Help: "Total number of checkpoint creations that failed.",
}),
checkpointCreationTotal: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_checkpoint_creations_total",
Help: "Total number of checkpoint creations attempted.",
}),
mmapChunkCorruptionTotal: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_mmap_chunk_corruptions_total",
Help: "Total number of memory-mapped chunk corruptions.",
}),
snapshotReplayErrorTotal: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_snapshot_replay_error_total",
Help: "Total number snapshot replays that failed.",
}),
oooHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_sample_ooo_delta",
Help: "Delta in seconds by which a sample is considered out of order (reported regardless of OOO time window and whether sample is accepted or not).",
Buckets: []float64{
60 * 10, // 10 min
60 * 30, // 30 min
60 * 60, // 60 min
60 * 60 * 2, // 2h
60 * 60 * 3, // 3h
60 * 60 * 6, // 6h
60 * 60 * 12, // 12h
},
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
}),
mmapChunksTotal: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_mmap_chunks_total",
Help: "Total number of chunks that were memory-mapped.",
}),
walReplayUnknownRefsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_replay_unknown_refs_total",
Help: "Total number of unknown series references encountered during WAL replay.",
}, []string{"type"}),
wblReplayUnknownRefsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_wbl_replay_unknown_refs_total",
Help: "Total number of unknown series references encountered during WBL replay.",
}, []string{"type"}),
}
if r != nil {
r.MustRegister(
m.activeAppenders,
m.series,
m.staleSeries,
m.chunks,
m.chunksCreated,
m.chunksRemoved,
m.seriesCreated,
m.seriesRemoved,
m.seriesNotFound,
m.gcDuration,
m.walTruncateDuration,
m.walCorruptionsTotal,
m.dataTotalReplayDuration,
m.samplesAppended,
m.outOfOrderSamplesAppended,
m.outOfBoundSamples,
m.outOfOrderSamples,
m.tooOldSamples,
m.headTruncateFail,
m.headTruncateTotal,
m.checkpointDeleteFail,
m.checkpointDeleteTotal,
m.checkpointCreationFail,
m.checkpointCreationTotal,
m.oooHistogram,
m.mmapChunksTotal,
m.mmapChunkCorruptionTotal,
m.snapshotReplayErrorTotal,
// Metrics bound to functions and not needed in tests
// can be created and registered on the spot.
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_max_time",
Help: "Maximum timestamp of the head block. The unit is decided by the library consumer.",
}, func() float64 {
return float64(h.MaxTime())
}),
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_min_time",
Help: "Minimum time bound of the head block. The unit is decided by the library consumer.",
}, func() float64 {
return float64(h.MinTime())
}),
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_isolation_low_watermark",
Help: "The lowest TSDB append ID that is still referenced.",
}, func() float64 {
return float64(h.iso.lowWatermark())
}),
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_isolation_high_watermark",
Help: "The highest TSDB append ID that has been given out.",
}, func() float64 {
return float64(h.iso.lastAppendID())
}),
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_chunks_storage_size_bytes",
Help: "Size of the chunks_head directory.",
}, func() float64 {
val, err := h.chunkDiskMapper.Size()
if err != nil {
h.logger.Error("Failed to calculate size of \"chunks_head\" dir",
"err", err.Error())
}
return float64(val)
}),
m.walReplayUnknownRefsTotal,
m.wblReplayUnknownRefsTotal,
)
}
return m
}
func mmappedChunksDir(dir string) string { return filepath.Join(dir, "chunks_head") }
// HeadStats are the statistics for the head component of the DB.
type HeadStats struct {
WALReplayStatus *WALReplayStatus
}
// NewHeadStats returns a new HeadStats object.
func NewHeadStats() *HeadStats {
return &HeadStats{
WALReplayStatus: &WALReplayStatus{},
}
}
// WALReplayStatus contains status information about the WAL replay.
type WALReplayStatus struct {
sync.RWMutex
Min int
Max int
Current int
}
// GetWALReplayStatus returns the WAL replay status information.
func (s *WALReplayStatus) GetWALReplayStatus() WALReplayStatus {
s.RLock()
defer s.RUnlock()
return WALReplayStatus{
Min: s.Min,
Max: s.Max,
Current: s.Current,
}
}
const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
// Init loads data from the write ahead log and prepares the head for writes.
// It should be called before using an appender so that it
// limits the ingested samples to the head min valid time.
func (h *Head) Init(minValidTime int64) error {
h.minValidTime.Store(minValidTime)
defer h.resetWLReplayResources()
defer func() {
h.postings.EnsureOrder(h.opts.WALReplayConcurrency)
}()
defer h.gc() // After loading the wal remove the obsolete data from the head.
defer func() {
// Loading of m-mapped chunks and snapshot can make the mint of the Head
// to go below minValidTime.
if h.MinTime() < h.minValidTime.Load() {
h.minTime.Store(h.minValidTime.Load())
}
}()
h.logger.Info("Replaying on-disk memory mappable chunks if any")
start := time.Now()
snapIdx, snapOffset := -1, 0
refSeries := make(map[chunks.HeadSeriesRef]*memSeries)
snapshotLoaded := false
var chunkSnapshotLoadDuration time.Duration
if h.opts.EnableMemorySnapshotOnShutdown {
h.logger.Info("Chunk snapshot is enabled, replaying from the snapshot")
// If there are any WAL files, there should be at least one WAL file with an index that is current or newer
// than the snapshot index. If the WAL index is behind the snapshot index somehow, the snapshot is assumed
// to be outdated.
loadSnapshot := true
if h.wal != nil {
_, endAt, err := wlog.Segments(h.wal.Dir())
if err != nil {
return fmt.Errorf("finding WAL segments: %w", err)
}
_, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot)
if err != nil && !errors.Is(err, record.ErrNotFound) {
h.logger.Error("Could not find last snapshot", "err", err)
}
if err == nil && endAt < idx {
loadSnapshot = false
h.logger.Warn("Last WAL file is behind snapshot, removing snapshots")
if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, math.MaxInt, math.MaxInt); err != nil {
h.logger.Error("Error while deleting snapshot directories", "err", err)
}
}
}
if loadSnapshot {
var err error
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err == nil {
snapshotLoaded = true
chunkSnapshotLoadDuration = time.Since(start)
h.logger.Info("Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String())
} else {
snapIdx, snapOffset = -1, 0
refSeries = make(map[chunks.HeadSeriesRef]*memSeries)
h.metrics.snapshotReplayErrorTotal.Inc()
h.logger.Error("Failed to load chunk snapshot", "err", err)
// We clear the partially loaded data to replay fresh from the WAL.
if err := h.resetInMemoryState(); err != nil {
return err
}
}
}
}
mmapChunkReplayStart := time.Now()
var (
mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
lastMmapRef chunks.ChunkDiskMapperRef
err error
mmapChunkReplayDuration time.Duration
)
if snapshotLoaded || h.wal != nil {
// If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded
// anyway. So we only load m-map chunks when it won't be discarded.
mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries)
if err != nil {
// TODO(codesome): clear out all m-map chunks here for refSeries.
h.logger.Error("Loading on-disk chunks failed", "err", err)
var cerr *chunks.CorruptionErr
if errors.As(err, &cerr) {
h.metrics.mmapChunkCorruptionTotal.Inc()
}
// Discard snapshot data since we need to replay the WAL for the missed m-map chunks data.
snapIdx, snapOffset = -1, 0
// If this fails, data will be recovered from WAL.
// Hence we wont lose any data (given WAL is not corrupt).
mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err)
if err != nil {
return err
}
}
mmapChunkReplayDuration = time.Since(mmapChunkReplayStart)
h.logger.Info("On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String())
}
if h.wal == nil {
h.logger.Info("WAL not found")
return nil
}
h.logger.Info("Replaying WAL, this may take a while")
checkpointReplayStart := time.Now()
// Backfill the checkpoint first if it exists.
dir, startFrom, err := wlog.LastCheckpoint(h.wal.Dir())
if err != nil && !errors.Is(err, record.ErrNotFound) {
return fmt.Errorf("find last checkpoint: %w", err)
}
// Find the last segment.
_, endAt, e := wlog.Segments(h.wal.Dir())
if e != nil {
return fmt.Errorf("finding WAL segments: %w", e)
}
h.startWALReplayStatus(startFrom, endAt)
syms := labels.NewSymbolTable() // One table for the whole WAL.
multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{}
if err == nil && startFrom >= snapIdx {
sr, err := wlog.NewSegmentsReader(dir)
if err != nil {
return fmt.Errorf("open checkpoint: %w", err)
}
defer func() {
if err := sr.Close(); err != nil {
h.logger.Warn("Error while closing the wal segments reader", "err", err)
}
}()
// A corrupted checkpoint is a hard error for now and requires user
// intervention. There's likely little data that can be recovered anyway.
if err := h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks); err != nil {
return fmt.Errorf("backfill checkpoint: %w", err)
}
h.updateWALReplayStatusRead(startFrom)
startFrom++
h.logger.Info("WAL checkpoint loaded")
}
checkpointReplayDuration := time.Since(checkpointReplayStart)
walReplayStart := time.Now()
if snapIdx > startFrom {
startFrom = snapIdx
}
// Backfill segments from the most recent checkpoint onwards.
for i := startFrom; i <= endAt; i++ {
walSegmentStart := time.Now()
s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wal.Dir(), i))
if err != nil {
return fmt.Errorf("open WAL segment: %d: %w", i, err)
}
offset := 0
if i == snapIdx {
offset = snapOffset
}
sr, err := wlog.NewSegmentBufReaderWithOffset(offset, s)
if errors.Is(err, io.EOF) {
// File does not exist.
continue
}
if err != nil {
return fmt.Errorf("segment reader (offset=%d): %w", offset, err)
}
err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks)
if err := sr.Close(); err != nil {
h.logger.Warn("Error while closing the wal segments reader", "err", err)
}
if err != nil {
return err
}
h.logger.Info("WAL segment loaded", "segment", i, "maxSegment", endAt, "duration", time.Since(walSegmentStart))
h.updateWALReplayStatusRead(i)
}
walReplayDuration := time.Since(walReplayStart)
wblReplayStart := time.Now()
if h.wbl != nil {
// Replay WBL.
startFrom, endAt, e = wlog.Segments(h.wbl.Dir())
if e != nil {
return &errLoadWbl{fmt.Errorf("finding WBL segments: %w", e)}
}
h.startWALReplayStatus(startFrom, endAt)
for i := startFrom; i <= endAt; i++ {
s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wbl.Dir(), i))
if err != nil {
return &errLoadWbl{fmt.Errorf("open WBL segment: %d: %w", i, err)}
}
sr := wlog.NewSegmentBufReader(s)
err = h.loadWBL(wlog.NewReader(sr), syms, multiRef, lastMmapRef)
if err := sr.Close(); err != nil {
h.logger.Warn("Error while closing the wbl segments reader", "err", err)
}
if err != nil {
return &errLoadWbl{err}
}
h.logger.Info("WBL segment loaded", "segment", i, "maxSegment", endAt)
h.updateWALReplayStatusRead(i)
}
}
wblReplayDuration := time.Since(wblReplayStart)
totalReplayDuration := time.Since(start)
h.metrics.dataTotalReplayDuration.Set(totalReplayDuration.Seconds())
h.logger.Info(
"WAL replay completed",
"checkpoint_replay_duration", checkpointReplayDuration.String(),
"wal_replay_duration", walReplayDuration.String(),
"wbl_replay_duration", wblReplayDuration.String(),
"chunk_snapshot_load_duration", chunkSnapshotLoadDuration.String(),
"mmap_chunk_replay_duration", mmapChunkReplayDuration.String(),
"total_replay_duration", totalReplayDuration.String(),
)
return nil
}
func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) {
mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
oooMmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
var lastRef, secondLastRef chunks.ChunkDiskMapperRef
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_wal.go | tsdb/head_wal.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"errors"
"fmt"
"maps"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/wlog"
)
// histogramRecord combines both RefHistogramSample and RefFloatHistogramSample
// to simplify the WAL replay.
type histogramRecord struct {
ref chunks.HeadSeriesRef
t int64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
type seriesRefSet struct {
refs map[chunks.HeadSeriesRef]struct{}
mtx sync.Mutex
}
func (s *seriesRefSet) merge(other map[chunks.HeadSeriesRef]struct{}) {
s.mtx.Lock()
defer s.mtx.Unlock()
maps.Copy(s.refs, other)
}
func (s *seriesRefSet) count() int {
s.mtx.Lock()
defer s.mtx.Unlock()
return len(s.refs)
}
func counterAddNonZero(v *prometheus.CounterVec, value float64, lvs ...string) {
if value > 0 {
v.WithLabelValues(lvs...).Add(value)
}
}
func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) {
// Track number of missing series records that were referenced by other records.
unknownSeriesRefs := &seriesRefSet{refs: make(map[chunks.HeadSeriesRef]struct{}), mtx: sync.Mutex{}}
// Track number of different records that referenced a series we don't know about
// for error reporting.
var unknownSampleRefs atomic.Uint64
var unknownExemplarRefs atomic.Uint64
var unknownHistogramRefs atomic.Uint64
var unknownMetadataRefs atomic.Uint64
var unknownTombstoneRefs atomic.Uint64
// Track number of series records that had overlapping m-map chunks.
var mmapOverlappingChunks atomic.Uint64
// Start workers that each process samples for a partition of the series ID space.
var (
wg sync.WaitGroup
concurrency = h.opts.WALReplayConcurrency
processors = make([]walSubsetProcessor, concurrency)
exemplarsInput chan record.RefExemplar
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
decoded = make(chan any, 10)
decodeErr, seriesCreationErr error
)
defer func() {
// For CorruptionErr ensure to terminate all workers before exiting.
_, ok := err.(*wlog.CorruptionErr)
if ok || seriesCreationErr != nil {
for i := range concurrency {
processors[i].closeAndDrain()
}
close(exemplarsInput)
wg.Wait()
}
}()
wg.Add(concurrency)
for i := range concurrency {
processors[i].setup()
go func(wp *walSubsetProcessor) {
missingSeries, unknownSamples, unknownHistograms, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks)
unknownSeriesRefs.merge(missingSeries)
unknownSampleRefs.Add(unknownSamples)
mmapOverlappingChunks.Add(overlapping)
unknownHistogramRefs.Add(unknownHistograms)
wg.Done()
}(&processors[i])
}
wg.Add(1)
exemplarsInput = make(chan record.RefExemplar, 300)
go func(input <-chan record.RefExemplar) {
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
var err error
defer wg.Done()
for e := range input {
ms := h.series.getByID(e.Ref)
if ms == nil {
unknownExemplarRefs.Inc()
missingSeries[e.Ref] = struct{}{}
continue
}
// At the moment the only possible error here is out of order exemplars, which we shouldn't see when
// replaying the WAL, so lets just log the error if it's not that type.
err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) {
h.logger.Warn("Unexpected error when replaying WAL on exemplar record", "err", err)
}
}
unknownSeriesRefs.merge(missingSeries)
}(exemplarsInput)
go func() {
defer close(decoded)
var err error
dec := record.NewDecoder(syms, h.logger)
for r.Next() {
switch dec.Type(r.Record()) {
case record.Series:
series := h.wlReplaySeriesPool.Get()[:0]
series, err = dec.Series(r.Record(), series)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode series: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- series
case record.Samples:
samples := h.wlReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(r.Record(), samples)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode samples: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- samples
case record.Tombstones:
tstones := h.wlReplaytStonesPool.Get()[:0]
tstones, err = dec.Tombstones(r.Record(), tstones)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode tombstones: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- tstones
case record.Exemplars:
exemplars := h.wlReplayExemplarsPool.Get()[:0]
exemplars, err = dec.Exemplars(r.Record(), exemplars)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode exemplars: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- exemplars
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
hists := h.wlReplayHistogramsPool.Get()[:0]
hists, err = dec.HistogramSamples(r.Record(), hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- hists
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
hists := h.wlReplayFloatHistogramsPool.Get()[:0]
hists, err = dec.FloatHistogramSamples(r.Record(), hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode float histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- hists
case record.Metadata:
meta := h.wlReplayMetadataPool.Get()[:0]
meta, err := dec.Metadata(r.Record(), meta)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode metadata: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- meta
default:
// Noop.
}
}
}()
// The records are always replayed from the oldest to the newest.
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
Outer:
for d := range decoded {
switch v := d.(type) {
case []record.RefSeries:
for _, walSeries := range v {
mSeries, created, err := h.getOrCreateWithOptionalID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels, false)
if err != nil {
seriesCreationErr = err
break Outer
}
if chunks.HeadSeriesRef(h.lastSeriesID.Load()) < walSeries.Ref {
h.lastSeriesID.Store(uint64(walSeries.Ref))
}
if !created {
multiRef[walSeries.Ref] = mSeries.ref
}
idx := uint64(mSeries.ref) % uint64(concurrency)
processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries}
}
h.wlReplaySeriesPool.Put(v)
case []record.RefSample:
samples := v
minValidTime := h.minValidTime.Load()
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
}
for _, sam := range samples[:m] {
if sam.T < minValidTime {
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
// This is a sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
for i := range concurrency {
if len(shards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplaySamplesPool.Put(v)
case []tombstones.Stone:
for _, s := range v {
for _, itv := range s.Intervals {
if itv.Maxt < h.minValidTime.Load() {
continue
}
if r, ok := multiRef[chunks.HeadSeriesRef(s.Ref)]; ok {
// This is a tombstone for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(chunks.HeadSeriesRef(s.Ref), itv.Maxt)
s.Ref = storage.SeriesRef(r)
}
if m := h.series.getByID(chunks.HeadSeriesRef(s.Ref)); m == nil {
unknownTombstoneRefs.Inc()
missingSeries[chunks.HeadSeriesRef(s.Ref)] = struct{}{}
continue
}
h.tombstones.AddInterval(s.Ref, itv)
}
}
h.wlReplaytStonesPool.Put(v)
case []record.RefExemplar:
for _, e := range v {
if e.T < h.minValidTime.Load() {
continue
}
if r, ok := multiRef[e.Ref]; ok {
// This is an exemplar for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(e.Ref, e.T)
e.Ref = r
}
exemplarsInput <- e
}
h.wlReplayExemplarsPool.Put(v)
case []record.RefHistogramSample:
samples := v
minValidTime := h.minValidTime.Load()
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if sam.T < minValidTime {
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
// This is a histogram sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayHistogramsPool.Put(v)
case []record.RefFloatHistogramSample:
samples := v
minValidTime := h.minValidTime.Load()
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if sam.T < minValidTime {
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
// This is a float histogram sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayFloatHistogramsPool.Put(v)
case []record.RefMetadata:
for _, m := range v {
if r, ok := multiRef[m.Ref]; ok {
m.Ref = r
}
s := h.series.getByID(m.Ref)
if s == nil {
unknownMetadataRefs.Inc()
missingSeries[m.Ref] = struct{}{}
continue
}
s.meta = &metadata.Metadata{
Type: record.ToMetricType(m.Type),
Unit: m.Unit,
Help: m.Help,
}
}
h.wlReplayMetadataPool.Put(v)
default:
panic(fmt.Errorf("unexpected decoded type: %T", d))
}
}
unknownSeriesRefs.merge(missingSeries)
if decodeErr != nil {
return decodeErr
}
if seriesCreationErr != nil {
// Drain the channel to unblock the goroutine.
for range decoded {
}
return seriesCreationErr
}
// Signal termination to each worker and wait for it to close its output channel.
for i := range concurrency {
processors[i].closeAndDrain()
}
close(exemplarsInput)
wg.Wait()
if err := r.Err(); err != nil {
return fmt.Errorf("read records: %w", err)
}
if unknownSampleRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load()+unknownTombstoneRefs.Load() > 0 {
h.logger.Warn(
"Unknown series references",
"series", unknownSeriesRefs.count(),
"samples", unknownSampleRefs.Load(),
"exemplars", unknownExemplarRefs.Load(),
"histograms", unknownHistogramRefs.Load(),
"metadata", unknownMetadataRefs.Load(),
"tombstones", unknownTombstoneRefs.Load(),
)
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownSeriesRefs.count()), "series")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownSampleRefs.Load()), "samples")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownExemplarRefs.Load()), "exemplars")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownHistogramRefs.Load()), "histograms")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownMetadataRefs.Load()), "metadata")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownTombstoneRefs.Load()), "tombstones")
}
if count := mmapOverlappingChunks.Load(); count > 0 {
h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count)
}
return nil
}
// resetSeriesWithMMappedChunks is only used during the WAL replay.
func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) {
if mSeries.ref != walSeriesRef {
// Checking if the new m-mapped chunks overlap with the already existing ones.
if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 {
if overlapsClosedInterval(
mSeries.mmappedChunks[0].minTime,
mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime,
mmc[0].minTime,
mmc[len(mmc)-1].maxTime,
) {
h.logger.Debug(
"M-mapped chunks overlap on a duplicate series record",
"series", mSeries.labels().String(),
"oldref", mSeries.ref,
"oldmint", mSeries.mmappedChunks[0].minTime,
"oldmaxt", mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime,
"newref", walSeriesRef,
"newmint", mmc[0].minTime,
"newmaxt", mmc[len(mmc)-1].maxTime,
)
overlapped = true
}
}
}
h.metrics.chunksCreated.Add(float64(len(mmc) + len(oooMmc)))
h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks)))
h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks)))
if mSeries.ooo != nil {
h.metrics.chunksRemoved.Add(float64(len(mSeries.ooo.oooMmappedChunks)))
h.metrics.chunks.Sub(float64(len(mSeries.ooo.oooMmappedChunks)))
}
mSeries.mmappedChunks = mmc
if len(oooMmc) == 0 {
mSeries.ooo = nil
} else {
if mSeries.ooo == nil {
mSeries.ooo = &memSeriesOOOFields{}
}
*mSeries.ooo = memSeriesOOOFields{oooMmappedChunks: oooMmc}
}
// Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject.
if len(mmc) == 0 {
mSeries.mmMaxTime = math.MinInt64
} else {
mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime
h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime)
}
if len(oooMmc) != 0 {
// Mint and maxt can be in any chunk, they are not sorted.
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
for _, ch := range oooMmc {
if ch.minTime < mint {
mint = ch.minTime
}
if ch.maxTime > maxt {
maxt = ch.maxTime
}
}
h.updateMinOOOMaxOOOTime(mint, maxt)
}
// Any samples replayed till now would already be compacted. Resetting the head chunk.
mSeries.nextAt = 0
mSeries.headChunks = nil
mSeries.app = nil
return overlapped
}
type walSubsetProcessor struct {
input chan walSubsetProcessorInputItem
output chan []record.RefSample
histogramsOutput chan []histogramRecord
}
type walSubsetProcessorInputItem struct {
samples []record.RefSample
histogramSamples []histogramRecord
existingSeries *memSeries
walSeriesRef chunks.HeadSeriesRef
}
func (wp *walSubsetProcessor) setup() {
wp.input = make(chan walSubsetProcessorInputItem, 300)
wp.output = make(chan []record.RefSample, 300)
wp.histogramsOutput = make(chan []histogramRecord, 300)
}
func (wp *walSubsetProcessor) closeAndDrain() {
close(wp.input)
for range wp.output {
}
for range wp.histogramsOutput {
}
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *walSubsetProcessor) reuseBuf() []record.RefSample {
select {
case buf := <-wp.output:
return buf[:0]
default:
}
return nil
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *walSubsetProcessor) reuseHistogramBuf() []histogramRecord {
select {
case buf := <-wp.histogramsOutput:
return buf[:0]
default:
}
return nil
}
// processWALSamples adds the samples it receives to the head and passes
// the buffer received to an output channel for reuse.
// Samples before the minValidTime timestamp are discarded.
func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (map[chunks.HeadSeriesRef]struct{}, uint64, uint64, uint64) {
defer close(wp.output)
defer close(wp.histogramsOutput)
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
var unknownSampleRefs, unknownHistogramRefs, mmapOverlappingChunks uint64
minValidTime := h.minValidTime.Load()
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
appendChunkOpts := chunkOpts{
chunkDiskMapper: h.chunkDiskMapper,
chunkRange: h.chunkRange.Load(),
samplesPerChunk: h.opts.SamplesPerChunk,
}
for in := range wp.input {
if in.existingSeries != nil {
mmc := mmappedChunks[in.walSeriesRef]
oooMmc := oooMmappedChunks[in.walSeriesRef]
if h.resetSeriesWithMMappedChunks(in.existingSeries, mmc, oooMmc, in.walSeriesRef) {
mmapOverlappingChunks++
}
continue
}
for _, s := range in.samples {
ms := h.series.getByID(s.Ref)
if ms == nil {
unknownSampleRefs++
missingSeries[s.Ref] = struct{}{}
continue
}
if s.T <= ms.mmMaxTime {
continue
}
if !value.IsStaleNaN(ms.lastValue) && value.IsStaleNaN(s.V) {
h.numStaleSeries.Inc()
}
if value.IsStaleNaN(ms.lastValue) && !value.IsStaleNaN(s.V) {
h.numStaleSeries.Dec()
}
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()
_ = ms.mmapChunks(h.chunkDiskMapper)
}
if s.T > maxt {
maxt = s.T
}
if s.T < mint {
mint = s.T
}
}
select {
case wp.output <- in.samples:
default:
}
for _, s := range in.histogramSamples {
if s.t < minValidTime {
continue
}
ms := h.series.getByID(s.ref)
if ms == nil {
unknownHistogramRefs++
missingSeries[s.ref] = struct{}{}
continue
}
if s.t <= ms.mmMaxTime {
continue
}
var chunkCreated, newlyStale, staleToNonStale bool
if s.h != nil {
newlyStale = value.IsStaleNaN(s.h.Sum)
if ms.lastHistogramValue != nil {
newlyStale = newlyStale && !value.IsStaleNaN(ms.lastHistogramValue.Sum)
staleToNonStale = value.IsStaleNaN(ms.lastHistogramValue.Sum) && !value.IsStaleNaN(s.h.Sum)
}
_, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts)
} else {
newlyStale = value.IsStaleNaN(s.fh.Sum)
if ms.lastFloatHistogramValue != nil {
newlyStale = newlyStale && !value.IsStaleNaN(ms.lastFloatHistogramValue.Sum)
staleToNonStale = value.IsStaleNaN(ms.lastFloatHistogramValue.Sum) && !value.IsStaleNaN(s.fh.Sum)
}
_, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts)
}
if newlyStale {
h.numStaleSeries.Inc()
}
if staleToNonStale {
h.numStaleSeries.Dec()
}
if chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()
}
if s.t > maxt {
maxt = s.t
}
if s.t < mint {
mint = s.t
}
}
select {
case wp.histogramsOutput <- in.histogramSamples:
default:
}
}
h.updateMinMaxTime(mint, maxt)
return missingSeries, unknownSampleRefs, unknownHistogramRefs, mmapOverlappingChunks
}
func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) {
// Track number of missing series records that were referenced by other records.
unknownSeriesRefs := &seriesRefSet{refs: make(map[chunks.HeadSeriesRef]struct{}), mtx: sync.Mutex{}}
// Track number of samples, histogram samples, and m-map markers that referenced a series we don't know about
// for error reporting.
var unknownSampleRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64
lastSeq, lastOff := lastMmapRef.Unpack()
// Start workers that each process samples for a partition of the series ID space.
var (
wg sync.WaitGroup
concurrency = h.opts.WALReplayConcurrency
processors = make([]wblSubsetProcessor, concurrency)
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
decodedCh = make(chan any, 10)
decodeErr error
)
defer func() {
// For CorruptionErr ensure to terminate all workers before exiting.
// We also wrap it to identify OOO WBL corruption.
_, ok := err.(*wlog.CorruptionErr)
if ok {
err = &errLoadWbl{err: err}
for i := range concurrency {
processors[i].closeAndDrain()
}
wg.Wait()
}
}()
wg.Add(concurrency)
for i := range concurrency {
processors[i].setup()
go func(wp *wblSubsetProcessor) {
missingSeries, unknownSamples, unknownHistograms := wp.processWBLSamples(h)
unknownSeriesRefs.merge(missingSeries)
unknownSampleRefs.Add(unknownSamples)
unknownHistogramRefs.Add(unknownHistograms)
wg.Done()
}(&processors[i])
}
go func() {
defer close(decodedCh)
dec := record.NewDecoder(syms, h.logger)
for r.Next() {
var err error
rec := r.Record()
switch dec.Type(rec) {
case record.Samples:
samples := h.wlReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(rec, samples)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode samples: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- samples
case record.MmapMarkers:
markers := h.wlReplayMmapMarkersPool.Get()[:0]
markers, err = dec.MmapMarkers(rec, markers)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode mmap markers: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- markers
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
hists := h.wlReplayHistogramsPool.Get()[:0]
hists, err = dec.HistogramSamples(rec, hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- hists
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
hists := h.wlReplayFloatHistogramsPool.Get()[:0]
hists, err = dec.FloatHistogramSamples(rec, hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode float histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- hists
default:
// Noop.
}
}
}()
// The records are always replayed from the oldest to the newest.
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
for d := range decodedCh {
switch v := d.(type) {
case []record.RefSample:
samples := v
// We split up the samples into parts of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
for i := range concurrency {
if len(shards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplaySamplesPool.Put(v)
case []record.RefMmapMarker:
markers := v
for _, rm := range markers {
seq, off := rm.MmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
// This m-map chunk from markers was not present during
// the load of mmapped chunks that happened in the head
// initialization.
continue
}
if r, ok := multiRef[rm.Ref]; ok {
rm.Ref = r
}
ms := h.series.getByID(rm.Ref)
if ms == nil {
mmapMarkerUnknownRefs.Inc()
missingSeries[rm.Ref] = struct{}{}
continue
}
idx := uint64(ms.ref) % uint64(concurrency)
processors[idx].input <- wblSubsetProcessorInputItem{mmappedSeries: ms}
}
case []record.RefHistogramSample:
samples := v
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayHistogramsPool.Put(v)
case []record.RefFloatHistogramSample:
samples := v
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayFloatHistogramsPool.Put(v)
default:
panic(fmt.Errorf("unexpected decodedCh type: %T", d))
}
}
unknownSeriesRefs.merge(missingSeries)
if decodeErr != nil {
return decodeErr
}
// Signal termination to each worker and wait for it to close its output channel.
for i := range concurrency {
processors[i].closeAndDrain()
}
wg.Wait()
if err := r.Err(); err != nil {
return fmt.Errorf("read records: %w", err)
}
if unknownSampleRefs.Load()+unknownHistogramRefs.Load()+mmapMarkerUnknownRefs.Load() > 0 {
h.logger.Warn(
"Unknown series references for ooo WAL replay",
"series", unknownSeriesRefs.count(),
"samples", unknownSampleRefs.Load(),
"histograms", unknownHistogramRefs.Load(),
"mmap_markers", mmapMarkerUnknownRefs.Load(),
)
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownSeriesRefs.count()), "series")
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownSampleRefs.Load()), "samples")
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownHistogramRefs.Load()), "histograms")
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(mmapMarkerUnknownRefs.Load()), "mmap_markers")
}
return nil
}
type errLoadWbl struct {
err error
}
func (e errLoadWbl) Error() string {
return e.err.Error()
}
func (e errLoadWbl) Cause() error {
return e.err
}
func (e errLoadWbl) Unwrap() error {
return e.err
}
type wblSubsetProcessor struct {
input chan wblSubsetProcessorInputItem
output chan []record.RefSample
histogramsOutput chan []histogramRecord
}
type wblSubsetProcessorInputItem struct {
mmappedSeries *memSeries
samples []record.RefSample
histogramSamples []histogramRecord
}
func (wp *wblSubsetProcessor) setup() {
wp.output = make(chan []record.RefSample, 300)
wp.histogramsOutput = make(chan []histogramRecord, 300)
wp.input = make(chan wblSubsetProcessorInputItem, 300)
}
func (wp *wblSubsetProcessor) closeAndDrain() {
close(wp.input)
for range wp.output {
}
for range wp.histogramsOutput {
}
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample {
select {
case buf := <-wp.output:
return buf[:0]
default:
}
return nil
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *wblSubsetProcessor) reuseHistogramBuf() []histogramRecord {
select {
case buf := <-wp.histogramsOutput:
return buf[:0]
default:
}
return nil
}
// processWBLSamples adds the samples it receives to the head and passes
// the buffer received to an output channel for reuse.
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/blockwriter_test.go | tsdb/blockwriter_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"math"
"path/filepath"
"testing"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
)
func TestBlockWriter(t *testing.T) {
ctx := context.Background()
outputDir := t.TempDir()
w, err := NewBlockWriter(promslog.NewNopLogger(), outputDir, DefaultBlockDuration)
require.NoError(t, err)
// Add some series.
app := w.Appender(ctx)
ts1, v1 := int64(44), float64(7)
_, err = app.Append(0, labels.FromStrings("a", "b"), ts1, v1)
require.NoError(t, err)
ts2, v2 := int64(55), float64(12)
_, err = app.Append(0, labels.FromStrings("c", "d"), ts2, v2)
require.NoError(t, err)
require.NoError(t, app.Commit())
id, err := w.Flush(ctx)
require.NoError(t, err)
// Confirm the block has the correct data.
blockpath := filepath.Join(outputDir, id.String())
b, err := OpenBlock(nil, blockpath, nil, nil)
require.NoError(t, err)
defer func() { require.NoError(t, b.Close()) }()
q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
sample1 := []chunks.Sample{sample{t: ts1, f: v1}}
sample2 := []chunks.Sample{sample{t: ts2, f: v2}}
expectedSeries := map[string][]chunks.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2}
require.Equal(t, expectedSeries, series)
require.NoError(t, w.Close())
}
func TestBlockWriter_AppenderV2(t *testing.T) {
ctx := context.Background()
outputDir := t.TempDir()
w, err := NewBlockWriter(promslog.NewNopLogger(), outputDir, DefaultBlockDuration)
require.NoError(t, err)
// Add some series.
app := w.AppenderV2(ctx)
ts1, v1 := int64(44), float64(7)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, ts1, v1, nil, nil, storage.AOptions{})
require.NoError(t, err)
ts2, v2 := int64(55), float64(12)
_, err = app.Append(0, labels.FromStrings("c", "d"), 0, ts2, v2, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
id, err := w.Flush(ctx)
require.NoError(t, err)
// Confirm the block has the correct data.
blockpath := filepath.Join(outputDir, id.String())
b, err := OpenBlock(nil, blockpath, nil, nil)
require.NoError(t, err)
defer func() { require.NoError(t, b.Close()) }()
q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
sample1 := []chunks.Sample{sample{t: ts1, f: v1}}
sample2 := []chunks.Sample{sample{t: ts2, f: v2}}
expectedSeries := map[string][]chunks.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2}
require.Equal(t, expectedSeries, series)
require.NoError(t, w.Close())
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/ooo_head_read_test.go | tsdb/ooo_head_read_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"fmt"
"math"
"slices"
"sort"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/util/compression"
)
type chunkInterval struct {
// because we permutate the order of chunks, we cannot determine at test declaration time which chunkRefs we expect in the Output.
// This ID matches expected output chunks against test input chunks, the test runner will assert the chunkRef for the matching chunk
ID int
mint int64
maxt int64
}
type expChunk struct {
c chunkInterval
m []chunkInterval
}
// permutateChunkIntervals returns all possible orders of the given chunkIntervals.
func permutateChunkIntervals(in []chunkInterval, out [][]chunkInterval, left, right int) [][]chunkInterval {
if left == right {
inCopy := make([]chunkInterval, len(in))
copy(inCopy, in)
return append(out, inCopy)
}
for i := left; i <= right; i++ {
in[left], in[i] = in[i], in[left]
out = permutateChunkIntervals(in, out, left+1, right)
in[left], in[i] = in[i], in[left]
}
return out
}
// TestOOOHeadIndexReader_Series tests that the Series method works as expected.
// However it does so by creating chunks and memory mapping them unlike other
// tests of the head where samples are appended and we let the head memory map.
// We do this because the ingestion path and the appender for out of order
// samples are not ready yet.
func TestOOOHeadIndexReader_Series(t *testing.T) {
tests := []struct {
name string
queryMinT int64
queryMaxT int64
inputChunkIntervals []chunkInterval
expChunks []expChunk
}{
{
name: "Empty result and no error when head is empty",
queryMinT: 0,
queryMaxT: 100,
expChunks: nil,
},
{
name: "If query interval is bigger than the existing chunks nothing is returned",
queryMinT: 500,
queryMaxT: 700,
inputChunkIntervals: []chunkInterval{
{0, 100, 400},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [---------------------------------------]
// Chunk 0 [-----------------------------------------------------------]
expChunks: nil,
},
{
name: "If query interval is smaller than the existing chunks nothing is returned",
queryMinT: 100,
queryMaxT: 400,
inputChunkIntervals: []chunkInterval{
{0, 500, 700},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [-----------------------------------------------------------]
// Chunk 0: [---------------------------------------]
expChunks: nil,
},
{
name: "If query interval exceeds the existing chunk, it is returned",
queryMinT: 100,
queryMaxT: 400,
inputChunkIntervals: []chunkInterval{
{0, 150, 350},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [-----------------------------------------------------------]
// Chunk 0: [---------------------------------------]
expChunks: []expChunk{
{c: chunkInterval{0, 150, 350}},
},
},
{
name: "If chunk exceeds the query interval, it is returned",
queryMinT: 150,
queryMaxT: 350,
inputChunkIntervals: []chunkInterval{
{0, 100, 400},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval: [---------------------------------------]
// Chunk 0: [-----------------------------------------------------------]
expChunks: []expChunk{
{c: chunkInterval{0, 100, 400}},
},
},
{
name: "Pairwise overlaps should return the references of the first of each pair",
queryMinT: 0,
queryMaxT: 700,
inputChunkIntervals: []chunkInterval{
{0, 100, 200},
{1, 500, 600},
{2, 150, 250},
{3, 550, 650},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [---------------------------------------------------------------------------------------------------------------------------------]
// Chunk 0: [-------------------]
// Chunk 1: [-------------------]
// Chunk 2: [-------------------]
// Chunk 3: [-------------------]
// Output Graphically [-----------------------------] [-----------------------------]
expChunks: []expChunk{
{c: chunkInterval{0, 100, 250}, m: []chunkInterval{{0, 100, 200}, {2, 150, 250}}},
{c: chunkInterval{1, 500, 650}, m: []chunkInterval{{1, 500, 600}, {3, 550, 650}}},
},
},
{
name: "If all chunks overlap, single big chunk is returned",
queryMinT: 0,
queryMaxT: 700,
inputChunkIntervals: []chunkInterval{
{0, 100, 200},
{1, 200, 300},
{2, 300, 400},
{3, 400, 500},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [---------------------------------------------------------------------------------------------------------------------------------]
// Chunk 0: [-------------------]
// Chunk 1: [-------------------]
// Chunk 2: [-------------------]
// Chunk 3: [------------------]
// Output Graphically [------------------------------------------------------------------------------]
expChunks: []expChunk{
{c: chunkInterval{0, 100, 500}, m: []chunkInterval{{0, 100, 200}, {1, 200, 300}, {2, 300, 400}, {3, 400, 500}}},
},
},
{
name: "If no chunks overlap, all chunks are returned",
queryMinT: 0,
queryMaxT: 700,
inputChunkIntervals: []chunkInterval{
{0, 100, 199},
{1, 200, 299},
{2, 300, 399},
{3, 400, 499},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [---------------------------------------------------------------------------------------------------------------------------------]
// Chunk 0: [------------------]
// Chunk 1: [------------------]
// Chunk 2: [------------------]
// Chunk 3: [------------------]
// Output Graphically [------------------][------------------][------------------][------------------]
expChunks: []expChunk{
{c: chunkInterval{0, 100, 199}},
{c: chunkInterval{1, 200, 299}},
{c: chunkInterval{2, 300, 399}},
{c: chunkInterval{3, 400, 499}},
},
},
{
name: "Triplet with pairwise overlaps, query range covers all, and distractor extra chunk",
queryMinT: 0,
queryMaxT: 400,
inputChunkIntervals: []chunkInterval{
{0, 100, 200},
{1, 150, 300},
{2, 250, 350},
{3, 450, 550},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [--------------------------------------------------------------------]
// Chunk 0: [------------------]
// Chunk 1: [-----------------------------]
// Chunk 2: [------------------]
// Chunk 3: [------------------]
// Output Graphically [-----------------------------------------------]
expChunks: []expChunk{
{c: chunkInterval{0, 100, 350}, m: []chunkInterval{{0, 100, 200}, {1, 150, 300}, {2, 250, 350}}},
},
},
{
name: "Query interval partially overlaps some chunks",
queryMinT: 100,
queryMaxT: 400,
inputChunkIntervals: []chunkInterval{
{0, 250, 500},
{1, 0, 200},
{2, 150, 300},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
// Query Interval [------------------------------------------------------------]
// Chunk 0: [-------------------------------------------------]
// Chunk 1: [-----------------------------]
// Chunk 2: [------------------------------]
// Output Graphically [-----------------------------------------------------------------------------------------]
expChunks: []expChunk{
{c: chunkInterval{1, 0, 500}, m: []chunkInterval{{1, 0, 200}, {2, 150, 300}, {0, 250, 500}}},
},
},
{
name: "A full overlap pair and disjointed triplet",
queryMinT: 0,
queryMaxT: 900,
inputChunkIntervals: []chunkInterval{
{0, 100, 300},
{1, 770, 850},
{2, 150, 250},
{3, 650, 750},
{4, 600, 800},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700 750 800 850
// Query Interval [---------------------------------------------------------------------------------------------------------------------------------------------------------------]
// Chunk 0: [---------------------------------------]
// Chunk 1: [--------------]
// Chunk 2: [-------------------]
// Chunk 3: [-------------------]
// Chunk 4: [---------------------------------------]
// Output Graphically [---------------------------------------] [------------------------------------------------]
expChunks: []expChunk{
{c: chunkInterval{0, 100, 300}, m: []chunkInterval{{0, 100, 300}, {2, 150, 250}}},
{c: chunkInterval{4, 600, 850}, m: []chunkInterval{{4, 600, 800}, {3, 650, 750}, {1, 770, 850}}},
},
},
{
name: "Query range covers 3 disjoint chunks",
queryMinT: 0,
queryMaxT: 650,
inputChunkIntervals: []chunkInterval{
{0, 100, 150},
{1, 300, 350},
{2, 200, 250},
},
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700 750 800 850
// Query Interval [----------------------------------------------------------------------------------------------------------------------]
// Chunk 0: [-------]
// Chunk 1: [----------]
// Chunk 2: [--------]
// Output Graphically [-------] [--------] [----------]
expChunks: []expChunk{
{c: chunkInterval{0, 100, 150}},
{c: chunkInterval{2, 200, 250}},
{c: chunkInterval{1, 300, 350}},
},
},
}
s1Lset := labels.FromStrings("foo", "bar")
s1ID := uint64(1)
for _, tc := range tests {
var permutations [][]chunkInterval
if len(tc.inputChunkIntervals) == 0 {
// handle special case
permutations = [][]chunkInterval{
nil,
}
} else {
permutations = permutateChunkIntervals(tc.inputChunkIntervals, nil, 0, len(tc.inputChunkIntervals)-1)
}
for perm, intervals := range permutations {
for _, headChunk := range []bool{false, true} {
t.Run(fmt.Sprintf("name=%s, permutation=%d, headChunk=%t", tc.name, perm, headChunk), func(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, true)
defer func() {
require.NoError(t, h.Close())
}()
require.NoError(t, h.Init(0))
s1, _, _ := h.getOrCreate(s1ID, s1Lset, false)
s1.ooo = &memSeriesOOOFields{}
// define our expected chunks, by looking at the expected ChunkIntervals and setting...
// Ref to whatever Ref the chunk has, that we refer to by ID
findID := func(id int) chunks.ChunkRef {
for ref, c := range intervals {
if c.ID == id {
return chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref)))
}
}
return 0
}
var expChunks []chunks.Meta
for _, e := range tc.expChunks {
var chunk chunkenc.Chunk
if len(e.m) > 0 {
mm := &multiMeta{}
for _, x := range e.m {
meta := chunks.Meta{
MinTime: x.mint,
MaxTime: x.maxt,
Ref: findID(x.ID),
}
mm.metas = append(mm.metas, meta)
}
chunk = mm
}
meta := chunks.Meta{
Chunk: chunk,
MinTime: e.c.mint,
MaxTime: e.c.maxt,
Ref: findID(e.c.ID),
}
expChunks = append(expChunks, meta)
}
if headChunk && len(intervals) > 0 {
// Put the last interval in the head chunk
s1.ooo.oooHeadChunk = &oooHeadChunk{
chunk: NewOOOChunk(),
minTime: intervals[len(intervals)-1].mint,
maxTime: intervals[len(intervals)-1].maxt,
}
intervals = intervals[:len(intervals)-1]
}
for _, ic := range intervals {
s1.ooo.oooMmappedChunks = append(s1.ooo.oooMmappedChunks, &mmappedChunk{
minTime: ic.mint,
maxTime: ic.maxt,
})
}
ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta
var b labels.ScratchBuilder
err := ir.Series(storage.SeriesRef(s1ID), &b, &chks)
require.NoError(t, err)
require.Equal(t, s1Lset, b.Labels())
require.Equal(t, expChunks, chks)
err = ir.Series(storage.SeriesRef(s1ID+1), &b, &chks)
require.Equal(t, storage.ErrNotFound, err)
})
}
}
}
}
func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testOOOHeadChunkReader_LabelValues(t, scenario)
})
}
}
//nolint:revive // unexported-return
func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenario) {
chunkRange := int64(2000)
head, _ := newTestHead(t, chunkRange, compression.None, true)
t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()
app := head.Appender(context.Background())
// Add in-order samples
_, _, err := scenario.appendFunc(app, labels.FromStrings("foo", "bar1"), 100, int64(1))
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, labels.FromStrings("foo", "bar2"), 100, int64(2))
require.NoError(t, err)
// Add ooo samples for those series
_, _, err = scenario.appendFunc(app, labels.FromStrings("foo", "bar1"), 90, int64(1))
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, labels.FromStrings("foo", "bar2"), 90, int64(2))
require.NoError(t, err)
require.NoError(t, app.Commit())
cases := []struct {
name string
queryMinT int64
queryMaxT int64
expValues1 []string
expValues2 []string
expValues3 []string
expValues4 []string
}{
{
name: "LabelValues calls when ooo head has max query range",
queryMinT: math.MinInt64,
queryMaxT: math.MaxInt64,
expValues1: []string{"bar1"},
expValues2: nil,
expValues3: []string{"bar1", "bar2"},
expValues4: []string{"bar1", "bar2"},
},
{
name: "LabelValues calls with ooo head query range not overlapping in-order data",
queryMinT: 90,
queryMaxT: 90,
expValues1: []string{"bar1"},
expValues2: nil,
expValues3: []string{"bar1", "bar2"},
expValues4: []string{"bar1", "bar2"},
},
{
name: "LabelValues calls with ooo head query range not overlapping out-of-order data",
queryMinT: 100,
queryMaxT: 100,
expValues1: []string{"bar1"},
expValues2: nil,
expValues3: []string{"bar1", "bar2"},
expValues4: []string{"bar1", "bar2"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// We first want to test using a head index reader that covers the biggest query interval
oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0)
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")}
values, err := oh.LabelValues(ctx, "foo", nil, matchers...)
sort.Strings(values)
require.NoError(t, err)
require.Equal(t, tc.expValues1, values)
matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^bar.")}
values, err = oh.LabelValues(ctx, "foo", nil, matchers...)
sort.Strings(values)
require.NoError(t, err)
require.Equal(t, tc.expValues2, values)
matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.")}
values, err = oh.LabelValues(ctx, "foo", nil, matchers...)
sort.Strings(values)
require.NoError(t, err)
require.Equal(t, tc.expValues3, values)
values, err = oh.LabelValues(ctx, "foo", nil)
sort.Strings(values)
require.NoError(t, err)
require.Equal(t, tc.expValues4, values)
})
}
}
// TestOOOHeadChunkReader_Chunk tests that the Chunk method works as expected.
// It does so by appending out of order samples to the db and then initializing
// an OOOHeadChunkReader to read chunks from it.
func TestOOOHeadChunkReader_Chunk(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testOOOHeadChunkReader_Chunk(t, scenario)
})
}
}
//nolint:revive // unexported-return
func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
opts := DefaultOptions()
opts.OutOfOrderCapMax = 5
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
s1 := labels.FromStrings("l", "v1")
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
db := newTestDB(t, withOpts(opts))
cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0)
defer cr.Close()
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
})
require.Nil(t, iterable)
require.EqualError(t, err, "not found")
require.Nil(t, c)
})
tests := []struct {
name string
queryMinT int64
queryMaxT int64
firstInOrderSampleAt int64
inputSamples []testValue
expSingleChunks bool
expChunkError bool
expChunksSamples []chunks.SampleSlice
}{
{
name: "Getting the head when there are no overlapping chunks returns just the samples in the head",
queryMinT: minutes(0),
queryMaxT: minutes(100),
firstInOrderSampleAt: minutes(120),
inputSamples: []testValue{
{Ts: minutes(30), V: 0},
{Ts: minutes(40), V: 0},
},
expChunkError: false,
expSingleChunks: true,
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
// Query Interval [------------------------------------------------------------------------------------------]
// Chunk 0: Current Head [--------] (With 2 samples)
// Output Graphically [--------] (With 2 samples)
expChunksSamples: []chunks.SampleSlice{
{
scenario.sampleFunc(minutes(30), 0),
scenario.sampleFunc(minutes(40), 0),
},
},
},
{
name: "Getting the head chunk when there are overlapping chunks returns all combined",
queryMinT: minutes(0),
queryMaxT: minutes(100),
firstInOrderSampleAt: minutes(120),
inputSamples: []testValue{{Ts: minutes(41), V: 0}, {Ts: minutes(42), V: 0}, {Ts: minutes(43), V: 0}, {Ts: minutes(44), V: 0}, {Ts: minutes(45), V: 0}, {Ts: minutes(30), V: 1}, {Ts: minutes(50), V: 1}},
expChunkError: false,
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
// Query Interval [------------------------------------------------------------------------------------------]
// Chunk 0 [---] (With 5 samples)
// Chunk 1: Current Head [-----------------] (With 2 samples)
// Output Graphically [-----------------] (With 7 samples)
expChunksSamples: []chunks.SampleSlice{
{
scenario.sampleFunc(minutes(30), 1),
scenario.sampleFunc(minutes(41), 0),
scenario.sampleFunc(minutes(42), 0),
scenario.sampleFunc(minutes(43), 0),
scenario.sampleFunc(minutes(44), 0),
scenario.sampleFunc(minutes(45), 0),
scenario.sampleFunc(minutes(50), 1),
},
},
},
{
name: "Two windows of overlapping chunks get properly converged",
queryMinT: minutes(0),
queryMaxT: minutes(100),
firstInOrderSampleAt: minutes(120),
inputSamples: []testValue{
// Chunk 0
{Ts: minutes(10), V: 0},
{Ts: minutes(12), V: 0},
{Ts: minutes(14), V: 0},
{Ts: minutes(16), V: 0},
{Ts: minutes(20), V: 0},
// Chunk 1
{Ts: minutes(20), V: 1},
{Ts: minutes(22), V: 1},
{Ts: minutes(24), V: 1},
{Ts: minutes(26), V: 1},
{Ts: minutes(29), V: 1},
// Chunk 3
{Ts: minutes(30), V: 2},
{Ts: minutes(32), V: 2},
{Ts: minutes(34), V: 2},
{Ts: minutes(36), V: 2},
{Ts: minutes(40), V: 2},
// Head
{Ts: minutes(40), V: 3},
{Ts: minutes(50), V: 3},
},
expChunkError: false,
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
// Query Interval [------------------------------------------------------------------------------------------]
// Chunk 0 [--------]
// Chunk 1 [-------]
// Chunk 2 [--------]
// Chunk 3: Current Head [--------]
// Output Graphically [----------------][-----------------]
expChunksSamples: []chunks.SampleSlice{
{
scenario.sampleFunc(minutes(10), 0),
scenario.sampleFunc(minutes(12), 0),
scenario.sampleFunc(minutes(14), 0),
scenario.sampleFunc(minutes(16), 0),
scenario.sampleFunc(minutes(20), 1),
scenario.sampleFunc(minutes(22), 1),
scenario.sampleFunc(minutes(24), 1),
scenario.sampleFunc(minutes(26), 1),
scenario.sampleFunc(minutes(29), 1),
},
{
scenario.sampleFunc(minutes(30), 2),
scenario.sampleFunc(minutes(32), 2),
scenario.sampleFunc(minutes(34), 2),
scenario.sampleFunc(minutes(36), 2),
scenario.sampleFunc(minutes(40), 3),
scenario.sampleFunc(minutes(50), 3),
},
},
},
{
name: "Two windows of overlapping chunks in descending order get properly converged",
queryMinT: minutes(0),
queryMaxT: minutes(100),
firstInOrderSampleAt: minutes(120),
inputSamples: []testValue{
// Chunk 0
{Ts: minutes(40), V: 0},
{Ts: minutes(42), V: 0},
{Ts: minutes(44), V: 0},
{Ts: minutes(46), V: 0},
{Ts: minutes(50), V: 0},
// Chunk 1
{Ts: minutes(30), V: 1},
{Ts: minutes(32), V: 1},
{Ts: minutes(34), V: 1},
{Ts: minutes(36), V: 1},
{Ts: minutes(40), V: 1},
// Chunk 3
{Ts: minutes(20), V: 2},
{Ts: minutes(22), V: 2},
{Ts: minutes(24), V: 2},
{Ts: minutes(26), V: 2},
{Ts: minutes(29), V: 2},
// Head
{Ts: minutes(10), V: 3},
{Ts: minutes(20), V: 3},
},
expChunkError: false,
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
// Query Interval [------------------------------------------------------------------------------------------]
// Chunk 0 [--------]
// Chunk 1 [--------]
// Chunk 2 [-------]
// Chunk 3: Current Head [--------]
// Output Graphically [----------------][-----------------]
expChunksSamples: []chunks.SampleSlice{
{
scenario.sampleFunc(minutes(10), 3),
scenario.sampleFunc(minutes(20), 2),
scenario.sampleFunc(minutes(22), 2),
scenario.sampleFunc(minutes(24), 2),
scenario.sampleFunc(minutes(26), 2),
scenario.sampleFunc(minutes(29), 2),
},
{
scenario.sampleFunc(minutes(30), 1),
scenario.sampleFunc(minutes(32), 1),
scenario.sampleFunc(minutes(34), 1),
scenario.sampleFunc(minutes(36), 1),
scenario.sampleFunc(minutes(40), 0),
scenario.sampleFunc(minutes(42), 0),
scenario.sampleFunc(minutes(44), 0),
scenario.sampleFunc(minutes(46), 0),
scenario.sampleFunc(minutes(50), 0),
},
},
},
{
name: "If chunks are not overlapped they are not converged",
queryMinT: minutes(0),
queryMaxT: minutes(100),
firstInOrderSampleAt: minutes(120),
inputSamples: []testValue{
// Chunk 0
{Ts: minutes(10), V: 0},
{Ts: minutes(12), V: 0},
{Ts: minutes(14), V: 0},
{Ts: minutes(16), V: 0},
{Ts: minutes(18), V: 0},
// Chunk 1
{Ts: minutes(20), V: 1},
{Ts: minutes(22), V: 1},
{Ts: minutes(24), V: 1},
{Ts: minutes(26), V: 1},
{Ts: minutes(28), V: 1},
// Chunk 3
{Ts: minutes(30), V: 2},
{Ts: minutes(32), V: 2},
{Ts: minutes(34), V: 2},
{Ts: minutes(36), V: 2},
{Ts: minutes(38), V: 2},
// Head
{Ts: minutes(40), V: 3},
{Ts: minutes(42), V: 3},
},
expChunkError: false,
expSingleChunks: true,
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
// Query Interval [------------------------------------------------------------------------------------------]
// Chunk 0 [-------]
// Chunk 1 [-------]
// Chunk 2 [-------]
// Chunk 3: Current Head [-------]
// Output Graphically [-------][-------][-------][--------]
expChunksSamples: []chunks.SampleSlice{
{
scenario.sampleFunc(minutes(10), 0),
scenario.sampleFunc(minutes(12), 0),
scenario.sampleFunc(minutes(14), 0),
scenario.sampleFunc(minutes(16), 0),
scenario.sampleFunc(minutes(18), 0),
},
{
scenario.sampleFunc(minutes(20), 1),
scenario.sampleFunc(minutes(22), 1),
scenario.sampleFunc(minutes(24), 1),
scenario.sampleFunc(minutes(26), 1),
scenario.sampleFunc(minutes(28), 1),
},
{
scenario.sampleFunc(minutes(30), 2),
scenario.sampleFunc(minutes(32), 2),
scenario.sampleFunc(minutes(34), 2),
scenario.sampleFunc(minutes(36), 2),
scenario.sampleFunc(minutes(38), 2),
},
{
scenario.sampleFunc(minutes(40), 3),
scenario.sampleFunc(minutes(42), 3),
},
},
},
{
name: "Triplet of chunks overlapping returns a single merged chunk",
queryMinT: minutes(0),
queryMaxT: minutes(100),
firstInOrderSampleAt: minutes(120),
inputSamples: []testValue{
// Chunk 0
{Ts: minutes(10), V: 0},
{Ts: minutes(15), V: 0},
{Ts: minutes(20), V: 0},
{Ts: minutes(25), V: 0},
{Ts: minutes(30), V: 0},
// Chunk 1
{Ts: minutes(20), V: 1},
{Ts: minutes(25), V: 1},
{Ts: minutes(30), V: 1},
{Ts: minutes(35), V: 1},
{Ts: minutes(42), V: 1},
// Chunk 2 Head
{Ts: minutes(32), V: 2},
{Ts: minutes(50), V: 2},
},
expChunkError: false,
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
// Query Interval [------------------------------------------------------------------------------------------]
// Chunk 0 [-----------------]
// Chunk 1 [--------------------]
// Chunk 2 Current Head [--------------]
// Output Graphically [-----------------------------------]
expChunksSamples: []chunks.SampleSlice{
{
scenario.sampleFunc(minutes(10), 0),
scenario.sampleFunc(minutes(15), 0),
scenario.sampleFunc(minutes(20), 1),
scenario.sampleFunc(minutes(25), 1),
scenario.sampleFunc(minutes(30), 1),
scenario.sampleFunc(minutes(32), 2),
scenario.sampleFunc(minutes(35), 1),
scenario.sampleFunc(minutes(42), 1),
scenario.sampleFunc(minutes(50), 2),
},
},
},
{
name: "Query interval partially overlaps with a triplet of chunks but still returns a single merged chunk",
queryMinT: minutes(12),
queryMaxT: minutes(33),
firstInOrderSampleAt: minutes(120),
inputSamples: []testValue{
// Chunk 0
{Ts: minutes(10), V: 0},
{Ts: minutes(15), V: 0},
{Ts: minutes(20), V: 0},
{Ts: minutes(25), V: 0},
{Ts: minutes(30), V: 0},
// Chunk 1
{Ts: minutes(20), V: 1},
{Ts: minutes(25), V: 1},
{Ts: minutes(30), V: 1},
{Ts: minutes(35), V: 1},
{Ts: minutes(42), V: 1},
// Chunk 2 Head
{Ts: minutes(32), V: 2},
{Ts: minutes(50), V: 2},
},
expChunkError: false,
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
// Query Interval [------------------]
// Chunk 0 [-----------------]
// Chunk 1 [--------------------]
// Chunk 2 Current Head [--------------]
// Output Graphically [-----------------------------------]
expChunksSamples: []chunks.SampleSlice{
{
scenario.sampleFunc(minutes(10), 0),
scenario.sampleFunc(minutes(15), 0),
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/repair.go | tsdb/repair.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"encoding/json"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
// repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in
// commit 129773b41a565fde5156301e37f9a87158030443.
func repairBadIndexVersion(logger *slog.Logger, dir string) error {
// All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected.
// We must actually set the index file version to 2 and revert the meta.json version back to 1.
dirs, err := blockDirs(dir)
if err != nil {
return fmt.Errorf("list block dirs in %q: %w", dir, err)
}
tmpFiles := make([]string, 0, len(dirs))
defer func() {
for _, tmp := range tmpFiles {
if err := os.RemoveAll(tmp); err != nil {
logger.Error("remove tmp file", "err", err.Error())
}
}
}()
for _, d := range dirs {
meta, err := readBogusMetaFile(d)
if err != nil {
logger.Error("failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err)
continue
}
if meta.Version == metaVersion1 {
logger.Info(
"Found healthy block",
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"ulid", meta.ULID,
)
continue
}
logger.Info(
"Fixing broken block",
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"ulid", meta.ULID,
)
repl, err := os.Create(filepath.Join(d, "index.repaired"))
if err != nil {
return fmt.Errorf("create index.repaired for block dir: %v: %w", d, err)
}
tmpFiles = append(tmpFiles, repl.Name())
broken, err := os.Open(filepath.Join(d, indexFilename))
if err != nil {
return fmt.Errorf("open broken index for block dir: %v: %w", d, err)
}
if _, err := io.Copy(repl, broken); err != nil {
return fmt.Errorf("copy content of index to index.repaired for block dir: %v: %w", d, err)
}
// Set the 5th byte to 2 to indicate the correct file format version.
if _, err := repl.WriteAt([]byte{2}, 4); err != nil {
errs := tsdb_errors.NewMulti(
fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err))
if err := repl.Close(); err != nil {
errs.Add(fmt.Errorf("close: %w", err))
}
return errs.Err()
}
if err := repl.Sync(); err != nil {
errs := tsdb_errors.NewMulti(
fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err))
if err := repl.Close(); err != nil {
errs.Add(fmt.Errorf("close: %w", err))
}
return errs.Err()
}
if err := repl.Close(); err != nil {
return fmt.Errorf("close repaired index for block dir: %v: %w", d, err)
}
if err := broken.Close(); err != nil {
if err := repl.Close(); err != nil {
return fmt.Errorf("close broken index for block dir: %v: %w", d, err)
}
}
if err := fileutil.Replace(repl.Name(), broken.Name()); err != nil {
if err := repl.Close(); err != nil {
return fmt.Errorf("replaced broken index with index.repaired for block dir: %v: %w", d, err)
}
}
// Reset version of meta.json to 1.
meta.Version = metaVersion1
if _, err := writeMetaFile(logger, d, meta); err != nil {
if err := repl.Close(); err != nil {
return fmt.Errorf("write meta for block dir: %v: %w", d, err)
}
}
}
return nil
}
func readBogusMetaFile(dir string) (*BlockMeta, error) {
b, err := os.ReadFile(filepath.Join(dir, metaFilename))
if err != nil {
return nil, err
}
var m BlockMeta
if err := json.Unmarshal(b, &m); err != nil {
return nil, err
}
if m.Version != metaVersion1 && m.Version != 2 {
return nil, fmt.Errorf("unexpected meta file version %d", m.Version)
}
return &m, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/mocks_test.go | tsdb/mocks_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"fmt"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
type mockIndexWriter struct {
seriesChunks []series
}
func copyChunk(c chunkenc.Chunk) (chunkenc.Chunk, error) {
b := c.Bytes()
nb := make([]byte, len(b))
copy(nb, b)
return chunkenc.FromData(c.Encoding(), nb)
}
func (mockIndexWriter) AddSymbol(string) error { return nil }
func (m *mockIndexWriter) AddSeries(_ storage.SeriesRef, l labels.Labels, chks ...chunks.Meta) error {
// Copy chunks as their bytes are pooled.
chksNew := make([]chunks.Meta, len(chks))
for i, chk := range chks {
c, err := copyChunk(chk.Chunk)
if err != nil {
return fmt.Errorf("mockIndexWriter: copy chunk: %w", err)
}
chksNew[i] = chunks.Meta{MaxTime: chk.MaxTime, MinTime: chk.MinTime, Chunk: c}
}
// We don't combine multiple same series together, by design as `AddSeries` requires full series to be saved.
m.seriesChunks = append(m.seriesChunks, series{l: l, chunks: chksNew})
return nil
}
func (mockIndexWriter) WriteLabelIndex([]string, []string) error { return nil }
func (mockIndexWriter) Close() error { return nil }
type mockBReader struct {
ir IndexReader
cr ChunkReader
mint int64
maxt int64
}
func (r *mockBReader) Index() (IndexReader, error) { return r.ir, nil }
func (r *mockBReader) Chunks() (ChunkReader, error) { return r.cr, nil }
func (*mockBReader) Tombstones() (tombstones.Reader, error) {
return tombstones.NewMemTombstones(), nil
}
func (r *mockBReader) Meta() BlockMeta { return BlockMeta{MinTime: r.mint, MaxTime: r.maxt} }
func (*mockBReader) Size() int64 { return 0 }
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/isolation_test.go | tsdb/isolation_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"math"
"strconv"
"sync"
"testing"
"github.com/stretchr/testify/require"
)
func TestIsolation(t *testing.T) {
type result struct {
id uint64
lowWatermark uint64
}
var appendA, appendB result
iso := newIsolation(false)
// Low watermark starts at 1.
require.Equal(t, uint64(0), iso.lowWatermark())
require.Equal(t, int64(math.MaxInt64), iso.lowestAppendTime())
// Pretend we are starting to append.
appendA.id, appendA.lowWatermark = iso.newAppendID(10)
require.Equal(t, result{1, 1}, appendA)
require.Equal(t, uint64(1), iso.lowWatermark())
require.Equal(t, 0, countOpenReads(iso))
require.Equal(t, int64(10), iso.lowestAppendTime())
// Now we start a read.
stateA := iso.State(10, 20)
require.Equal(t, 1, countOpenReads(iso))
// Second appender.
appendB.id, appendB.lowWatermark = iso.newAppendID(20)
require.Equal(t, result{2, 1}, appendB)
require.Equal(t, uint64(1), iso.lowWatermark())
require.Equal(t, int64(10), iso.lowestAppendTime())
iso.closeAppend(appendA.id)
// Low watermark remains at 1 because stateA is still open
require.Equal(t, uint64(1), iso.lowWatermark())
require.Equal(t, 1, countOpenReads(iso))
require.Equal(t, int64(20), iso.lowestAppendTime())
// Finish the read and low watermark should rise.
stateA.Close()
require.Equal(t, uint64(2), iso.lowWatermark())
require.Equal(t, 0, countOpenReads(iso))
iso.closeAppend(appendB.id)
require.Equal(t, uint64(2), iso.lowWatermark())
require.Equal(t, int64(math.MaxInt64), iso.lowestAppendTime())
}
func countOpenReads(iso *isolation) int {
count := 0
iso.TraverseOpenReads(func(*isolationState) bool {
count++
return true
})
return count
}
func BenchmarkIsolation(b *testing.B) {
for _, goroutines := range []int{10, 100, 1000, 10000} {
b.Run(strconv.Itoa(goroutines), func(b *testing.B) {
iso := newIsolation(false)
wg := sync.WaitGroup{}
start := make(chan struct{})
for range goroutines {
wg.Add(1)
go func() {
defer wg.Done()
<-start
for b.Loop() {
appendID, _ := iso.newAppendID(0)
iso.closeAppend(appendID)
}
}()
}
b.ResetTimer()
close(start)
wg.Wait()
})
}
}
func BenchmarkIsolationWithState(b *testing.B) {
for _, goroutines := range []int{10, 100, 1000, 10000} {
b.Run(strconv.Itoa(goroutines), func(b *testing.B) {
iso := newIsolation(false)
wg := sync.WaitGroup{}
start := make(chan struct{})
for range goroutines {
wg.Add(1)
go func() {
defer wg.Done()
<-start
for b.Loop() {
appendID, _ := iso.newAppendID(0)
iso.closeAppend(appendID)
}
}()
}
readers := goroutines / 100
if readers == 0 {
readers++
}
for g := 0; g < readers; g++ {
wg.Add(1)
go func() {
defer wg.Done()
<-start
for b.Loop() {
s := iso.State(math.MinInt64, math.MaxInt64)
s.Close()
}
}()
}
b.ResetTimer()
close(start)
wg.Wait()
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/ooo_isolation.go | tsdb/ooo_isolation.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"container/list"
"sync"
"github.com/prometheus/prometheus/tsdb/chunks"
)
type oooIsolation struct {
mtx sync.RWMutex
openReads *list.List
}
type oooIsolationState struct {
i *oooIsolation
e *list.Element
minRef chunks.ChunkDiskMapperRef
}
func newOOOIsolation() *oooIsolation {
return &oooIsolation{
openReads: list.New(),
}
}
// HasOpenReadsAtOrBefore returns true if this oooIsolation is aware of any reads that use
// chunks with reference at or before ref.
func (i *oooIsolation) HasOpenReadsAtOrBefore(ref chunks.ChunkDiskMapperRef) bool {
i.mtx.RLock()
defer i.mtx.RUnlock()
for e := i.openReads.Front(); e != nil; e = e.Next() {
s := e.Value.(*oooIsolationState)
if ref.GreaterThan(s.minRef) {
return true
}
}
return false
}
// TrackReadAfter records a read that uses chunks with reference after minRef.
//
// The caller must ensure that the returned oooIsolationState is eventually closed when
// the read is complete.
func (i *oooIsolation) TrackReadAfter(minRef chunks.ChunkDiskMapperRef) *oooIsolationState {
s := &oooIsolationState{
i: i,
minRef: minRef,
}
i.mtx.Lock()
s.e = i.openReads.PushBack(s)
i.mtx.Unlock()
return s
}
func (s oooIsolationState) Close() {
s.i.mtx.Lock()
s.i.openReads.Remove(s.e)
s.i.mtx.Unlock()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/ooo_head_read.go | tsdb/ooo_head_read.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"math"
"slices"
"github.com/oklog/ulid/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
)
var _ IndexReader = &HeadAndOOOIndexReader{}
type HeadAndOOOIndexReader struct {
*headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
inoMint int64
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
}
var _ chunkenc.Iterable = &mergedOOOChunks{}
// mergedOOOChunks holds the list of iterables for overlapping chunks.
type mergedOOOChunks struct {
chunkIterables []chunkenc.Iterable
}
func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables)
}
func NewHeadAndOOOIndexReader(head *Head, inoMint, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader {
hr := &headIndexReader{
head: head,
mint: mint,
maxt: maxt,
}
return &HeadAndOOOIndexReader{hr, inoMint, lastGarbageCollectedMmapRef}
}
func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
oh.head.metrics.seriesNotFound.Inc()
return storage.ErrNotFound
}
builder.Assign(s.labels())
if chks == nil {
return nil
}
s.Lock()
defer s.Unlock()
*chks = (*chks)[:0]
if s.ooo != nil {
return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, oh.inoMint, chks)
}
*chks = appendSeriesChunks(s, oh.inoMint, oh.maxt, *chks)
return nil
}
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, inoMint int64, chks *[]chunks.Meta) error {
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
tmpChks = append(tmpChks, chunks.Meta{
MinTime: minT,
MaxTime: maxT,
Ref: ref,
Chunk: chunk,
})
}
// Collect all chunks that overlap the query range.
if s.ooo.oooHeadChunk != nil {
c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(mint, maxt) && maxMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
if err != nil {
handleChunkWriteError(err)
return nil
}
for _, chk := range chks {
addChunk(chk.minTime, chk.maxTime, ref, chk.chunk)
}
} else {
var emptyChunk chunkenc.Chunk
addChunk(c.minTime, c.maxTime, ref, emptyChunk)
}
}
}
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
c := s.ooo.oooMmappedChunks[i]
if c.OverlapsClosedInterval(mint, maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
addChunk(c.minTime, c.maxTime, ref, nil)
}
}
if includeInOrder {
tmpChks = appendSeriesChunks(s, inoMint, maxt, tmpChks)
}
// There is nothing to do if we did not collect any chunk.
if len(tmpChks) == 0 {
return nil
}
// Next we want to sort all the collected chunks by min time so we can find
// those that overlap.
slices.SortFunc(tmpChks, lessByMinTimeAndMinRef)
// Next we want to iterate the sorted collected chunks and return composites for chunks that overlap with others.
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we will return
// [5,7], [6,8].
toBeMerged := tmpChks[0]
for _, c := range tmpChks[1:] {
if c.MinTime > toBeMerged.MaxTime {
// This chunk doesn't overlap. Send current toBeMerged to output and start a new one.
*chks = append(*chks, toBeMerged)
toBeMerged = c
} else {
// Merge this chunk with existing toBeMerged.
if mm, ok := toBeMerged.Chunk.(*multiMeta); ok {
mm.metas = append(mm.metas, c)
} else {
toBeMerged.Chunk = &multiMeta{metas: []chunks.Meta{toBeMerged, c}}
}
if toBeMerged.MaxTime < c.MaxTime {
toBeMerged.MaxTime = c.MaxTime
}
}
}
*chks = append(*chks, toBeMerged)
return nil
}
// Fake Chunk object to pass a set of Metas inside Meta.Chunk.
type multiMeta struct {
chunkenc.Chunk // We don't expect any of the methods to be called.
metas []chunks.Meta
}
// LabelValues needs to be overridden from the headIndexReader implementation
// so we can return labels within either in-order range or ooo range.
func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() {
return []string{}, nil
}
if len(matchers) == 0 {
return oh.head.postings.LabelValues(ctx, name, hints), nil
}
return labelValuesWithMatchers(ctx, oh, name, hints, matchers...)
}
func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
switch {
case a.MinTime < b.MinTime:
return -1
case a.MinTime > b.MinTime:
return 1
}
switch {
case a.Ref < b.Ref:
return -1
case a.Ref > b.Ref:
return 1
default:
return 0
}
}
type HeadAndOOOChunkReader struct {
head *Head
mint, maxt int64
cr *headChunkReader // If nil, only read OOO chunks.
maxMmapRef chunks.ChunkDiskMapperRef
oooIsoState *oooIsolationState
}
func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader {
return &HeadAndOOOChunkReader{
head: head,
mint: mint,
maxt: maxt,
cr: cr,
maxMmapRef: maxMmapRef,
oooIsoState: oooIsoState,
}
}
func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
c, it, _, err := cr.chunkOrIterable(meta, false)
return c, it, err
}
// ChunkOrIterableWithCopy implements ChunkReaderWithCopy. The special Copy
// behaviour is only implemented for the in-order head chunk.
func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
return cr.chunkOrIterable(meta, true)
}
func (cr *HeadAndOOOChunkReader) chunkOrIterable(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
sid, cid, isOOO := unpackHeadChunkRef(meta.Ref)
s := cr.head.series.getByID(sid)
// This means that the series has been garbage collected.
if s == nil {
return nil, nil, 0, storage.ErrNotFound
}
var isoState *isolationState
if cr.cr != nil {
isoState = cr.cr.isoState
}
s.Lock()
defer s.Unlock()
if meta.Chunk == nil {
c, maxt, err := cr.head.chunkFromSeries(s, cid, isOOO, meta.MinTime, meta.MaxTime, isoState, copyLastChunk)
return c, nil, maxt, err
}
mm, ok := meta.Chunk.(*multiMeta)
if !ok { // Complete chunk was supplied.
return meta.Chunk, nil, meta.MaxTime, nil
}
// We have a composite meta: construct a composite iterable.
mc := &mergedOOOChunks{}
for _, m := range mm.metas {
switch {
case m.Chunk != nil:
mc.chunkIterables = append(mc.chunkIterables, m.Chunk)
default:
_, cid, isOOO := unpackHeadChunkRef(m.Ref)
iterable, _, err := cr.head.chunkFromSeries(s, cid, isOOO, m.MinTime, m.MaxTime, isoState, copyLastChunk)
if err != nil {
return nil, nil, 0, fmt.Errorf("invalid head chunk: %w", err)
}
mc.chunkIterables = append(mc.chunkIterables, iterable)
}
}
return nil, mc, meta.MaxTime, nil
}
func (cr *HeadAndOOOChunkReader) Close() error {
if cr.cr != nil && cr.cr.isoState != nil {
cr.cr.isoState.Close()
}
if cr.oooIsoState != nil {
cr.oooIsoState.Close()
}
return nil
}
type OOOCompactionHead struct {
head *Head
lastMmapRef chunks.ChunkDiskMapperRef
lastWBLFile int
postings []storage.SeriesRef
chunkRange int64
mint, maxt int64 // Among all the compactable chunks.
}
// NewOOOCompactionHead does the following:
// 1. M-maps all the in-memory ooo chunks.
// 2. Compute the expected block ranges while iterating through all ooo series and store it.
// 3. Store the list of postings having ooo series.
// 4. Cuts a new WBL file for the OOO WBL.
// All the above together have a bit of CPU and memory overhead, and can have a bit of impact
// on the sample append latency. So call NewOOOCompactionHead only right before compaction.
func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) {
ch := &OOOCompactionHead{
head: head,
chunkRange: head.chunkRange.Load(),
mint: math.MaxInt64,
maxt: math.MinInt64,
}
if head.wbl != nil {
lastWBLFile, err := head.wbl.NextSegmentSync()
if err != nil {
return nil, err
}
ch.lastWBLFile = lastWBLFile
}
hr := headIndexReader{head: head, mint: ch.mint, maxt: ch.maxt}
n, v := index.AllPostingsKey()
// TODO: filter to series with OOO samples, before sorting.
p, err := hr.Postings(ctx, n, v)
if err != nil {
return nil, err
}
p = hr.SortedPostings(p)
var lastSeq, lastOff int
for p.Next() {
seriesRef := p.At()
ms := head.series.getByID(chunks.HeadSeriesRef(seriesRef))
if ms == nil {
continue
}
// M-map the in-memory chunk and keep track of the last one.
// Also build the block ranges -> series map.
// TODO: consider having a lock specifically for ooo data.
ms.Lock()
if ms.ooo == nil {
ms.Unlock()
continue
}
var lastMmapRef chunks.ChunkDiskMapperRef
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper, head.logger)
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
}
if len(mmapRefs) == 0 {
lastMmapRef = 0
} else {
lastMmapRef = mmapRefs[len(mmapRefs)-1]
}
seq, off := lastMmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off
}
if len(ms.ooo.oooMmappedChunks) > 0 {
ch.postings = append(ch.postings, seriesRef)
for _, c := range ms.ooo.oooMmappedChunks {
if c.minTime < ch.mint {
ch.mint = c.minTime
}
if c.maxTime > ch.maxt {
ch.maxt = c.maxTime
}
}
}
ms.Unlock()
}
return ch, nil
}
func (ch *OOOCompactionHead) Index() (IndexReader, error) {
return NewOOOCompactionHeadIndexReader(ch), nil
}
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil
}
func (*OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
return tombstones.NewMemTombstones(), nil
}
var oooCompactionHeadULID = ulid.MustParse("0000000000XX000COMPACTHEAD")
func (ch *OOOCompactionHead) Meta() BlockMeta {
return BlockMeta{
MinTime: ch.mint,
MaxTime: ch.maxt,
ULID: oooCompactionHeadULID,
Stats: BlockStats{
NumSeries: uint64(len(ch.postings)),
},
}
}
// CloneForTimeRange clones the OOOCompactionHead such that the IndexReader and ChunkReader
// obtained from this only looks at the m-map chunks within the given time ranges while not looking
// beyond the ch.lastMmapRef.
// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead.
func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead {
return &OOOCompactionHead{
head: ch.head,
lastMmapRef: ch.lastMmapRef,
postings: ch.postings,
chunkRange: ch.chunkRange,
mint: mint,
maxt: maxt,
}
}
func (*OOOCompactionHead) Size() int64 { return 0 }
func (ch *OOOCompactionHead) MinTime() int64 { return ch.mint }
func (ch *OOOCompactionHead) MaxTime() int64 { return ch.maxt }
func (ch *OOOCompactionHead) ChunkRange() int64 { return ch.chunkRange }
func (ch *OOOCompactionHead) LastMmapRef() chunks.ChunkDiskMapperRef { return ch.lastMmapRef }
func (ch *OOOCompactionHead) LastWBLFile() int { return ch.lastWBLFile }
type OOOCompactionHeadIndexReader struct {
ch *OOOCompactionHead
}
func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader {
return &OOOCompactionHeadIndexReader{ch: ch}
}
func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter {
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
return hr.Symbols()
}
func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) {
n, v := index.AllPostingsKey()
if name != n || len(values) != 1 || values[0] != v {
return nil, errors.New("only AllPostingsKey is supported")
}
return index.NewListPostings(ir.ch.postings), nil
}
func (*OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
return index.ErrPostings(errors.New("not supported"))
}
func (*OOOCompactionHeadIndexReader) PostingsForAllLabelValues(context.Context, string) index.Postings {
return index.ErrPostings(errors.New("not supported"))
}
func (*OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings {
// This will already be sorted from the Postings() call above.
return p
}
func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
return hr.ShardedPostings(p, shardIndex, shardCount)
}
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
s := ir.ch.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
ir.ch.head.metrics.seriesNotFound.Inc()
return storage.ErrNotFound
}
builder.Assign(s.labels())
s.Lock()
defer s.Unlock()
*chks = (*chks)[:0]
if s.ooo == nil {
return nil
}
return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks)
}
func (*OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
func (*OOOCompactionHeadIndexReader) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
func (*OOOCompactionHeadIndexReader) PostingsForMatchers(context.Context, bool, ...*labels.Matcher) (index.Postings, error) {
return nil, errors.New("not implemented")
}
func (*OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
func (*OOOCompactionHeadIndexReader) LabelNamesFor(context.Context, index.Postings) ([]string, error) {
return nil, errors.New("not implemented")
}
func (*OOOCompactionHeadIndexReader) Close() error {
return nil
}
// HeadAndOOOQuerier queries both the head and the out-of-order head.
type HeadAndOOOQuerier struct {
mint, maxt int64
head *Head
index IndexReader
chunkr ChunkReader
querier storage.Querier // Used for LabelNames, LabelValues, but may be nil if head was truncated in the mean time, in which case we ignore it and not close it in the end.
}
func NewHeadAndOOOQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier {
cr := &headChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: head.iso.State(mint, maxt),
}
return &HeadAndOOOQuerier{
mint: mint,
maxt: maxt,
head: head,
index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier,
}
}
func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if q.querier == nil {
return nil, nil, nil
}
return q.querier.LabelValues(ctx, name, hints, matchers...)
}
func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if q.querier == nil {
return nil, nil, nil
}
return q.querier.LabelNames(ctx, hints, matchers...)
}
func (q *HeadAndOOOQuerier) Close() error {
q.chunkr.Close()
if q.querier == nil {
return nil
}
return q.querier.Close()
}
func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
}
// HeadAndOOOChunkQuerier queries both the head and the out-of-order head.
type HeadAndOOOChunkQuerier struct {
mint, maxt int64
head *Head
index IndexReader
chunkr ChunkReader
querier storage.ChunkQuerier
}
func NewHeadAndOOOChunkQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier {
cr := &headChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: head.iso.State(mint, maxt),
}
return &HeadAndOOOChunkQuerier{
mint: mint,
maxt: maxt,
head: head,
index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier,
}
}
func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if q.querier == nil {
return nil, nil, nil
}
return q.querier.LabelValues(ctx, name, hints, matchers...)
}
func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if q.querier == nil {
return nil, nil, nil
}
return q.querier.LabelNames(ctx, hints, matchers...)
}
func (q *HeadAndOOOChunkQuerier) Close() error {
q.chunkr.Close()
if q.querier == nil {
return nil
}
return q.querier.Close()
}
func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/db_test.go | tsdb/db_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"errors"
"flag"
"fmt"
"hash/crc32"
"log/slog"
"math"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"sync"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/oklog/ulid/v2"
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
var isolationEnabled bool
flag.BoolVar(&isolationEnabled, "test.tsdb-isolation", true, "enable isolation")
flag.Parse()
defaultIsolationDisabled = !isolationEnabled
goleak.VerifyTestMain(m,
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"),
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"))
}
type testDBOptions struct {
dir string
opts *Options
rngs []int64
}
type testDBOpt func(o *testDBOptions)
func withDir(dir string) testDBOpt {
return func(o *testDBOptions) {
o.dir = dir
}
}
func withOpts(opts *Options) testDBOpt {
return func(o *testDBOptions) {
o.opts = opts
}
}
func withRngs(rngs ...int64) testDBOpt {
return func(o *testDBOptions) {
o.rngs = rngs
}
}
func newTestDB(t testing.TB, opts ...testDBOpt) (db *DB) {
var o testDBOptions
for _, opt := range opts {
opt(&o)
}
if o.opts == nil {
o.opts = DefaultOptions()
}
if o.dir == "" {
o.dir = t.TempDir()
}
var err error
if len(o.rngs) == 0 {
db, err = Open(o.dir, nil, nil, o.opts, nil)
} else {
o.opts, o.rngs = validateOpts(o.opts, o.rngs)
db, err = open(o.dir, nil, nil, o.opts, o.rngs, nil)
}
require.NoError(t, err)
t.Cleanup(func() {
// Always close. DB is safe for close-after-close.
require.NoError(t, db.Close())
})
return db
}
func TestDBClose_AfterClose(t *testing.T) {
db := newTestDB(t)
require.NoError(t, db.Close())
require.NoError(t, db.Close())
// Double check if we are closing correct DB after reuse.
db = newTestDB(t)
require.NoError(t, db.Close())
require.NoError(t, db.Close())
}
// query runs a matcher query against the querier and fully expands its data.
func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]chunks.Sample {
ss := q.Select(context.Background(), false, nil, matchers...)
defer func() {
require.NoError(t, q.Close())
}()
var it chunkenc.Iterator
result := map[string][]chunks.Sample{}
for ss.Next() {
series := ss.At()
it = series.Iterator(it)
samples, err := storage.ExpandSamples(it, newSample)
require.NoError(t, err)
require.NoError(t, it.Err())
if len(samples) == 0 {
continue
}
name := series.Labels().String()
result[name] = samples
}
require.NoError(t, ss.Err())
require.Empty(t, ss.Warnings())
return result
}
// queryAndExpandChunks runs a matcher query against the querier and fully expands its data into samples.
func queryAndExpandChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][][]chunks.Sample {
s := queryChunks(t, q, matchers...)
res := make(map[string][][]chunks.Sample)
for k, v := range s {
var samples [][]chunks.Sample
for _, chk := range v {
sam, err := storage.ExpandSamples(chk.Chunk.Iterator(nil), nil)
require.NoError(t, err)
samples = append(samples, sam)
}
res[k] = samples
}
return res
}
// queryChunks runs a matcher query against the querier and expands its data.
func queryChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][]chunks.Meta {
ss := q.Select(context.Background(), false, nil, matchers...)
defer func() {
require.NoError(t, q.Close())
}()
var it chunks.Iterator
result := map[string][]chunks.Meta{}
for ss.Next() {
series := ss.At()
chks := []chunks.Meta{}
it = series.Iterator(it)
for it.Next() {
chks = append(chks, it.At())
}
require.NoError(t, it.Err())
if len(chks) == 0 {
continue
}
name := series.Labels().String()
result[name] = chks
}
require.NoError(t, ss.Err())
require.Empty(t, ss.Warnings())
return result
}
// Ensure that blocks are held in memory in their time order
// and not in ULID order as they are read from the directory.
func TestDB_reloadOrder(t *testing.T) {
db := newTestDB(t)
metas := []BlockMeta{
{MinTime: 90, MaxTime: 100},
{MinTime: 70, MaxTime: 80},
{MinTime: 100, MaxTime: 110},
}
for _, m := range metas {
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
}
require.NoError(t, db.reloadBlocks())
blocks := db.Blocks()
require.Len(t, blocks, 3)
require.Equal(t, metas[1].MinTime, blocks[0].Meta().MinTime)
require.Equal(t, metas[1].MaxTime, blocks[0].Meta().MaxTime)
require.Equal(t, metas[0].MinTime, blocks[1].Meta().MinTime)
require.Equal(t, metas[0].MaxTime, blocks[1].Meta().MaxTime)
require.Equal(t, metas[2].MinTime, blocks[2].Meta().MinTime)
require.Equal(t, metas[2].MaxTime, blocks[2].Meta().MaxTime)
}
func TestDataAvailableOnlyAfterCommit(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0)
require.NoError(t, err)
querier, err := db.Querier(0, 1)
require.NoError(t, err)
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
err = app.Commit()
require.NoError(t, err)
querier, err = db.Querier(0, 1)
require.NoError(t, err)
defer querier.Close()
seriesSet = query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet)
}
// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
// https://github.com/prometheus/prometheus/issues/7548
func TestNoPanicAfterWALCorruption(t *testing.T) {
db := newTestDB(t, withOpts(&Options{WALSegmentSize: 32 * 1024}))
// Append until the first mmapped head chunk.
// This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted.
var expSamples []chunks.Sample
var maxt int64
ctx := context.Background()
{
// Appending 121 samples because on the 121st a new chunk will be created.
for range 121 {
app := db.Appender(ctx)
_, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0)
expSamples = append(expSamples, sample{t: maxt, f: 0})
require.NoError(t, err)
require.NoError(t, app.Commit())
maxt++
}
require.NoError(t, db.Close())
}
// Corrupt the WAL after the first sample of the series so that it has at least one sample and
// it is not garbage collected.
// The repair deletes all WAL records after the corrupted record and these are read from the mmapped chunk.
{
walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal"))
require.NoError(t, err)
f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666)
require.NoError(t, err)
r := wlog.NewReader(bufio.NewReader(f))
require.True(t, r.Next(), "reading the series record")
require.True(t, r.Next(), "reading the first sample record")
// Write an invalid record header to corrupt everything after the first wal sample.
_, err = f.WriteAt([]byte{99}, r.Offset())
require.NoError(t, err)
f.Close()
}
// Query the data.
{
db := newTestDB(t, withDir(db.Dir()))
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal), "WAL corruption count mismatch")
querier, err := db.Querier(0, maxt)
require.NoError(t, err)
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "", ""))
// The last sample should be missing as it was after the WAL segment corruption.
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: expSamples[0 : len(expSamples)-1]}, seriesSet)
}
}
func TestDataNotAvailableAfterRollback(t *testing.T) {
db := newTestDB(t)
app := db.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("type", "float"), 0, 0)
require.NoError(t, err)
_, err = app.AppendHistogram(
0, labels.FromStrings("type", "histogram"), 0,
&histogram.Histogram{Count: 42, Sum: math.NaN()}, nil,
)
require.NoError(t, err)
_, err = app.AppendHistogram(
0, labels.FromStrings("type", "floathistogram"), 0,
nil, &histogram.FloatHistogram{Count: 42, Sum: math.NaN()},
)
require.NoError(t, err)
err = app.Rollback()
require.NoError(t, err)
for _, typ := range []string{"float", "histogram", "floathistogram"} {
querier, err := db.Querier(0, 1)
require.NoError(t, err)
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "type", typ))
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
}
sr, err := wlog.NewSegmentsReader(db.head.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series and samples.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
default:
}
}
// Check that only series get stored after calling Rollback.
require.Equal(t, 3, walSeriesCount, "series should have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
}
func TestDBAppenderAddRef(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app1 := db.Appender(ctx)
ref1, err := app1.Append(0, labels.FromStrings("a", "b"), 123, 0)
require.NoError(t, err)
// Reference should already work before commit.
ref2, err := app1.Append(ref1, labels.EmptyLabels(), 124, 1)
require.NoError(t, err)
require.Equal(t, ref1, ref2)
err = app1.Commit()
require.NoError(t, err)
app2 := db.Appender(ctx)
// first ref should already work in next transaction.
ref3, err := app2.Append(ref1, labels.EmptyLabels(), 125, 0)
require.NoError(t, err)
require.Equal(t, ref1, ref3)
ref4, err := app2.Append(ref1, labels.FromStrings("a", "b"), 133, 1)
require.NoError(t, err)
require.Equal(t, ref1, ref4)
// Reference must be valid to add another sample.
ref5, err := app2.Append(ref2, labels.EmptyLabels(), 143, 2)
require.NoError(t, err)
require.Equal(t, ref1, ref5)
// Missing labels & invalid refs should fail.
_, err = app2.Append(9999999, labels.EmptyLabels(), 1, 1)
require.ErrorIs(t, err, ErrInvalidSample)
require.NoError(t, app2.Commit())
q, err := db.Querier(0, 200)
require.NoError(t, err)
res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
labels.FromStrings("a", "b").String(): {
sample{t: 123, f: 0},
sample{t: 124, f: 1},
sample{t: 125, f: 0},
sample{t: 133, f: 1},
sample{t: 143, f: 2},
},
}, res)
}
func TestAppendEmptyLabelsIgnored(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app1 := db.Appender(ctx)
ref1, err := app1.Append(0, labels.FromStrings("a", "b"), 123, 0)
require.NoError(t, err)
// Add with empty label.
ref2, err := app1.Append(0, labels.FromStrings("a", "b", "c", ""), 124, 0)
require.NoError(t, err)
// Should be the same series.
require.Equal(t, ref1, ref2)
err = app1.Commit()
require.NoError(t, err)
}
func TestDeleteSimple(t *testing.T) {
const numSamples int64 = 10
cases := []struct {
Intervals tombstones.Intervals
remaint []int64
}{
{
Intervals: tombstones.Intervals{{Mint: 0, Maxt: 3}},
remaint: []int64{4, 5, 6, 7, 8, 9},
},
{
Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}},
remaint: []int64{0, 4, 5, 6, 7, 8, 9},
},
{
Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
remaint: []int64{0, 8, 9},
},
{
Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 700}},
remaint: []int64{0},
},
{ // This case is to ensure that labels and symbols are deleted.
Intervals: tombstones.Intervals{{Mint: 0, Maxt: 9}},
remaint: []int64{},
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
smpls := make([]float64, numSamples)
for i := range numSamples {
smpls[i] = rand.Float64()
app.Append(0, labels.FromStrings("a", "b"), i, smpls[i])
}
require.NoError(t, app.Commit())
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _, r := range c.Intervals {
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
}
// Compare the result.
q, err := db.Querier(0, numSamples)
require.NoError(t, err)
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
})
for {
eok, rok := expss.Next(), res.Next()
require.Equal(t, eok, rok)
if !eok {
require.Empty(t, res.Warnings())
break
}
sexp := expss.At()
sres := res.At()
require.Equal(t, sexp.Labels(), sres.Labels())
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
})
}
}
func TestAmendHistogramDatapointCausesError(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.Appender(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0)
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 1)
require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp)
require.NoError(t, app.Rollback())
h := histogram.Histogram{
Schema: 3,
Count: 52,
Sum: 2.7,
ZeroThreshold: 0.1,
ZeroCount: 42,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 10, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
}
fh := h.ToFloat(nil)
app = db.Appender(ctx)
_, err = app.AppendHistogram(0, labels.FromStrings("a", "c"), 0, h.Copy(), nil)
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.Appender(ctx)
_, err = app.AppendHistogram(0, labels.FromStrings("a", "c"), 0, h.Copy(), nil)
require.NoError(t, err)
h.Schema = 2
_, err = app.AppendHistogram(0, labels.FromStrings("a", "c"), 0, h.Copy(), nil)
require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
require.NoError(t, app.Rollback())
// Float histogram.
app = db.Appender(ctx)
_, err = app.AppendHistogram(0, labels.FromStrings("a", "d"), 0, nil, fh.Copy())
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.Appender(ctx)
_, err = app.AppendHistogram(0, labels.FromStrings("a", "d"), 0, nil, fh.Copy())
require.NoError(t, err)
fh.Schema = 2
_, err = app.AppendHistogram(0, labels.FromStrings("a", "d"), 0, nil, fh.Copy())
require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
require.NoError(t, app.Rollback())
}
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, math.NaN())
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.Appender(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, math.NaN())
require.NoError(t, err)
}
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, math.Float64frombits(0x7ff0000000000001))
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.Appender(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, math.Float64frombits(0x7ff0000000000002))
require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp)
}
func TestEmptyLabelsetCausesError(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
_, err := app.Append(0, labels.Labels{}, 0, 0)
require.Error(t, err)
require.Equal(t, "empty labelset: invalid sample", err.Error())
}
func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
db := newTestDB(t)
// Append AmendedValue.
ctx := context.Background()
app := db.Appender(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, 1)
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 2)
require.NoError(t, err)
require.NoError(t, app.Commit())
// Make sure the right value is stored.
q, err := db.Querier(0, 10)
require.NoError(t, err)
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
}, ssMap)
// Append Out of Order Value.
app = db.Appender(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 10, 3)
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("a", "b"), 7, 5)
require.NoError(t, err)
require.NoError(t, app.Commit())
q, err = db.Querier(0, 10)
require.NoError(t, err)
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
}, ssMap)
}
func TestDB_Snapshot(t *testing.T) {
db := newTestDB(t)
// append data
ctx := context.Background()
app := db.Appender(ctx)
mint := int64(1414141414000)
for i := range 1000 {
_, err := app.Append(0, labels.FromStrings("foo", "bar"), mint+int64(i), 1.0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// create snapshot
snap := t.TempDir()
require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close())
// reopen DB from snapshot
db = newTestDB(t, withDir(snap))
querier, err := db.Querier(mint, mint+1000)
require.NoError(t, err)
defer func() { require.NoError(t, querier.Close()) }()
// sum values
seriesSet := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
var series chunkenc.Iterator
sum := 0.0
for seriesSet.Next() {
series = seriesSet.At().Iterator(series)
for series.Next() == chunkenc.ValFloat {
_, v := series.At()
sum += v
}
require.NoError(t, series.Err())
}
require.NoError(t, seriesSet.Err())
require.Empty(t, seriesSet.Warnings())
require.Equal(t, 1000.0, sum)
}
// TestDB_Snapshot_ChunksOutsideOfCompactedRange ensures that a snapshot removes chunks samples
// that are outside the set block time range.
// See https://github.com/prometheus/prometheus/issues/5105
func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
mint := int64(1414141414000)
for i := range 1000 {
_, err := app.Append(0, labels.FromStrings("foo", "bar"), mint+int64(i), 1.0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
snap := t.TempDir()
// Hackingly introduce "race", by having lower max time then maxTime in last chunk.
db.head.maxTime.Sub(10)
require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close())
// reopen DB from snapshot
db = newTestDB(t, withDir(snap))
querier, err := db.Querier(mint, mint+1000)
require.NoError(t, err)
defer func() { require.NoError(t, querier.Close()) }()
// Sum values.
seriesSet := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
var series chunkenc.Iterator
sum := 0.0
for seriesSet.Next() {
series = seriesSet.At().Iterator(series)
for series.Next() == chunkenc.ValFloat {
_, v := series.At()
sum += v
}
require.NoError(t, series.Err())
}
require.NoError(t, seriesSet.Err())
require.Empty(t, seriesSet.Warnings())
// Since we snapshotted with MaxTime - 10, so expect 10 less samples.
require.Equal(t, 1000.0-10, sum)
}
func TestDB_SnapshotWithDelete(t *testing.T) {
const numSamples int64 = 10
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
smpls := make([]float64, numSamples)
for i := range numSamples {
smpls[i] = rand.Float64()
app.Append(0, labels.FromStrings("a", "b"), i, smpls[i])
}
require.NoError(t, app.Commit())
cases := []struct {
intervals tombstones.Intervals
remaint []int64
}{
{
intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
remaint: []int64{0, 8, 9},
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _, r := range c.intervals {
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
}
// create snapshot
snap := t.TempDir()
require.NoError(t, db.Snapshot(snap, true))
// reopen DB from snapshot
db := newTestDB(t, withDir(snap))
// Compare the result.
q, err := db.Querier(0, numSamples)
require.NoError(t, err)
defer func() { require.NoError(t, q.Close()) }()
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
})
if len(expSamples) == 0 {
require.False(t, res.Next())
return
}
for {
eok, rok := expss.Next(), res.Next()
require.Equal(t, eok, rok)
if !eok {
require.Empty(t, res.Warnings())
break
}
sexp := expss.At()
sres := res.At()
require.Equal(t, sexp.Labels(), sres.Labels())
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
})
}
}
func TestDB_e2e(t *testing.T) {
const (
numDatapoints = 1000
numRanges = 1000
timeInterval = int64(3)
)
// Create 8 series with 1000 data-points of different ranges and run queries.
lbls := [][]labels.Label{
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prom-k8s"},
},
}
seriesMap := map[string][]chunks.Sample{}
for _, l := range lbls {
seriesMap[labels.New(l...).String()] = []chunks.Sample{}
}
db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
for _, l := range lbls {
lset := labels.New(l...)
series := []chunks.Sample{}
ts := rand.Int63n(300)
for range numDatapoints {
v := rand.Float64()
series = append(series, sample{ts, v, nil, nil})
_, err := app.Append(0, lset, ts, v)
require.NoError(t, err)
ts += rand.Int63n(timeInterval) + 1
}
seriesMap[lset.String()] = series
}
require.NoError(t, app.Commit())
// Query each selector on 1000 random time-ranges.
queries := []struct {
ms []*labels.Matcher
}{
{
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "b")},
},
{
ms: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b"),
labels.MustNewMatcher(labels.MatchEqual, "job", "prom-k8s"),
},
},
{
ms: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "c"),
labels.MustNewMatcher(labels.MatchEqual, "instance", "localhost:9090"),
labels.MustNewMatcher(labels.MatchEqual, "job", "prometheus"),
},
},
// TODO: Add Regexp Matchers.
}
for _, qry := range queries {
matched := labels.Slice{}
for _, l := range lbls {
s := labels.Selector(qry.ms)
ls := labels.New(l...)
if s.Matches(ls) {
matched = append(matched, ls)
}
}
sort.Sort(matched)
for range numRanges {
mint := rand.Int63n(300)
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
expected := map[string][]chunks.Sample{}
// Build the mockSeriesSet.
for _, m := range matched {
smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
if len(smpls) > 0 {
expected[m.String()] = smpls
}
}
q, err := db.Querier(mint, maxt)
require.NoError(t, err)
ss := q.Select(ctx, false, nil, qry.ms...)
result := map[string][]chunks.Sample{}
for ss.Next() {
x := ss.At()
smpls, err := storage.ExpandSamples(x.Iterator(nil), newSample)
require.NoError(t, err)
if len(smpls) > 0 {
result[x.Labels().String()] = smpls
}
}
require.NoError(t, ss.Err())
require.Empty(t, ss.Warnings())
require.Equal(t, expected, result)
q.Close()
}
}
}
func TestWALFlushedOnDBClose(t *testing.T) {
db := newTestDB(t)
lbls := labels.FromStrings("labelname", "labelvalue")
ctx := context.Background()
app := db.Appender(ctx)
_, err := app.Append(0, lbls, 0, 1)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.NoError(t, db.Close())
db = newTestDB(t, withDir(db.Dir()))
q, err := db.Querier(0, 1)
require.NoError(t, err)
values, ws, err := q.LabelValues(ctx, "labelname", nil)
require.NoError(t, err)
require.Empty(t, ws)
require.Equal(t, []string{"labelvalue"}, values)
}
func TestWALSegmentSizeOptions(t *testing.T) {
tests := map[int]func(dbdir string, segmentSize int){
// Default Wal Size.
0: func(dbDir string, _ int) {
filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
require.NoError(t, err)
files := []os.FileInfo{}
for _, f := range filesAndDir {
if !f.IsDir() {
fi, err := f.Info()
require.NoError(t, err)
files = append(files, fi)
}
}
// All the full segment files (all but the last) should match the segment size option.
for _, f := range files[:len(files)-1] {
require.Equal(t, int64(DefaultOptions().WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
}
lastFile := files[len(files)-1]
require.Greater(t, int64(DefaultOptions().WALSegmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
},
// Custom Wal Size.
2 * 32 * 1024: func(dbDir string, segmentSize int) {
filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
require.NoError(t, err)
files := []os.FileInfo{}
for _, f := range filesAndDir {
if !f.IsDir() {
fi, err := f.Info()
require.NoError(t, err)
files = append(files, fi)
}
}
require.NotEmpty(t, files, "current WALSegmentSize should result in more than a single WAL file.")
// All the full segment files (all but the last) should match the segment size option.
for _, f := range files[:len(files)-1] {
require.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
}
lastFile := files[len(files)-1]
require.Greater(t, int64(segmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
},
// Wal disabled.
-1: func(dbDir string, _ int) {
// Check that WAL dir is not there.
_, err := os.Stat(filepath.Join(dbDir, "wal"))
require.Error(t, err)
// Check that there is chunks dir.
_, err = os.Stat(mmappedChunksDir(dbDir))
require.NoError(t, err)
},
}
for segmentSize, testFunc := range tests {
t.Run(fmt.Sprintf("WALSegmentSize %d test", segmentSize), func(t *testing.T) {
opts := DefaultOptions()
opts.WALSegmentSize = segmentSize
db := newTestDB(t, withOpts(opts))
for i := range int64(155) {
app := db.Appender(context.Background())
ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), i, rand.Float64())
require.NoError(t, err)
for j := int64(1); j <= 78; j++ {
_, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64())
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
require.NoError(t, db.Close())
testFunc(db.Dir(), opts.WALSegmentSize)
})
}
}
// https://github.com/prometheus/prometheus/issues/9846
// https://github.com/prometheus/prometheus/issues/9859
func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) {
const (
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/example_test.go | tsdb/example_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"fmt"
"math"
"os"
"time"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
func Example() {
// Create a random dir to work in. Open() doesn't require a pre-existing dir, but
// we want to make sure not to make a mess where we shouldn't.
dir, err := os.MkdirTemp("", "tsdb-test")
noErr(err)
// Open a TSDB for reading and/or writing.
db, err := Open(dir, nil, nil, DefaultOptions(), nil)
noErr(err)
// Open an appender for writing.
app := db.Appender(context.Background())
series := labels.FromStrings("foo", "bar")
// Ref is 0 for the first append since we don't know the reference for the series.
ref, err := app.Append(0, series, time.Now().Unix(), 123)
noErr(err)
// Another append for a second later.
// Re-using the ref from above since it's the same series, makes append faster.
time.Sleep(time.Second)
_, err = app.Append(ref, series, time.Now().Unix(), 124)
noErr(err)
// Commit to storage.
err = app.Commit()
noErr(err)
// In case you want to do more appends after app.Commit(),
// you need a new appender.
app = db.Appender(context.Background())
// ... adding more samples.
// Open a querier for reading.
querier, err := db.Querier(math.MinInt64, math.MaxInt64)
noErr(err)
ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
for ss.Next() {
series := ss.At()
fmt.Println("series:", series.Labels().String())
it := series.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
_, v := it.At() // We ignore the timestamp here, only to have a predictable output we can test against (below)
fmt.Println("sample", v)
}
fmt.Println("it.Err():", it.Err())
}
fmt.Println("ss.Err():", ss.Err())
ws := ss.Warnings()
if len(ws) > 0 {
fmt.Println("warnings:", ws)
}
err = querier.Close()
noErr(err)
// Clean up any last resources when done.
err = db.Close()
noErr(err)
err = os.RemoveAll(dir)
noErr(err)
// Output:
// series: {foo="bar"}
// sample 123
// sample 124
// it.Err(): <nil>
// ss.Err(): <nil>
}
func noErr(err error) {
if err != nil {
panic(err)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/db_append_v2_test.go | tsdb/db_append_v2_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"bufio"
"context"
"fmt"
"log/slog"
"math"
"math/rand"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"testing"
"time"
"github.com/oklog/ulid/v2"
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
)
// TODO(bwplotka): Ensure non-ported tests are not deleted from db_test.go when removing AppenderV1 flow (#17632):
// * TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks
// * TestChunkQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks
// * TestEmptyLabelsetCausesError
// * TestQueryHistogramFromBlocksWithCompaction
// TODO(krajorama): Add histograms test cases.
func TestDataAvailableOnlyAfterCommit_AppendV2(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
querier, err := db.Querier(0, 1)
require.NoError(t, err)
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
err = app.Commit()
require.NoError(t, err)
querier, err = db.Querier(0, 1)
require.NoError(t, err)
defer querier.Close()
seriesSet = query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet)
}
// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
// https://github.com/prometheus/prometheus/issues/7548
func TestNoPanicAfterWALCorruption_AppendV2(t *testing.T) {
db := newTestDB(t, withOpts(&Options{WALSegmentSize: 32 * 1024}))
// Append until the first mmapped head chunk.
// This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted.
var expSamples []chunks.Sample
var maxt int64
ctx := context.Background()
{
// Appending 121 samples because on the 121st a new chunk will be created.
for range 121 {
app := db.AppenderV2(ctx)
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, maxt, 0, nil, nil, storage.AOptions{})
expSamples = append(expSamples, sample{t: maxt, f: 0})
require.NoError(t, err)
require.NoError(t, app.Commit())
maxt++
}
require.NoError(t, db.Close())
}
// Corrupt the WAL after the first sample of the series so that it has at least one sample and
// it is not garbage collected.
// The repair deletes all WAL records after the corrupted record and these are read from the mmapped chunk.
{
walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal"))
require.NoError(t, err)
f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666)
require.NoError(t, err)
r := wlog.NewReader(bufio.NewReader(f))
require.True(t, r.Next(), "reading the series record")
require.True(t, r.Next(), "reading the first sample record")
// Write an invalid record header to corrupt everything after the first wal sample.
_, err = f.WriteAt([]byte{99}, r.Offset())
require.NoError(t, err)
f.Close()
}
// Query the data.
{
db := newTestDB(t, withDir(db.Dir()))
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal), "WAL corruption count mismatch")
querier, err := db.Querier(0, maxt)
require.NoError(t, err)
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "", ""))
// The last sample should be missing as it was after the WAL segment corruption.
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: expSamples[0 : len(expSamples)-1]}, seriesSet)
}
}
func TestDataNotAvailableAfterRollback_AppendV2(t *testing.T) {
db := newTestDB(t)
app := db.AppenderV2(context.Background())
_, err := app.Append(0, labels.FromStrings("type", "float"), 0, 0, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
_, err = app.Append(
0, labels.FromStrings("type", "histogram"), 0, 0, 0,
&histogram.Histogram{Count: 42, Sum: math.NaN()}, nil,
storage.AOptions{},
)
require.NoError(t, err)
_, err = app.Append(
0, labels.FromStrings("type", "floathistogram"), 0, 0, 0,
nil, &histogram.FloatHistogram{Count: 42, Sum: math.NaN()},
storage.AOptions{},
)
require.NoError(t, err)
err = app.Rollback()
require.NoError(t, err)
for _, typ := range []string{"float", "histogram", "floathistogram"} {
querier, err := db.Querier(0, 1)
require.NoError(t, err)
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "type", typ))
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
}
sr, err := wlog.NewSegmentsReader(db.head.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series and samples.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
default:
}
}
// Check that only series get stored after calling Rollback.
require.Equal(t, 3, walSeriesCount, "series should have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
}
func TestDBAppenderV2_AddRef(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app1 := db.AppenderV2(ctx)
ref1, err := app1.Append(0, labels.FromStrings("a", "b"), 0, 123, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
// Reference should already work before commit.
ref2, err := app1.Append(ref1, labels.EmptyLabels(), 0, 124, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.Equal(t, ref1, ref2)
err = app1.Commit()
require.NoError(t, err)
app2 := db.AppenderV2(ctx)
// first ref should already work in next transaction.
ref3, err := app2.Append(ref1, labels.EmptyLabels(), 0, 125, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.Equal(t, ref1, ref3)
ref4, err := app2.Append(ref1, labels.FromStrings("a", "b"), 0, 133, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.Equal(t, ref1, ref4)
// Reference must be valid to add another sample.
ref5, err := app2.Append(ref2, labels.EmptyLabels(), 0, 143, 2, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.Equal(t, ref1, ref5)
// Missing labels & invalid refs should fail.
_, err = app2.Append(9999999, labels.EmptyLabels(), 0, 1, 1, nil, nil, storage.AOptions{})
require.ErrorIs(t, err, ErrInvalidSample)
require.NoError(t, app2.Commit())
q, err := db.Querier(0, 200)
require.NoError(t, err)
res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
labels.FromStrings("a", "b").String(): {
sample{t: 123, f: 0},
sample{t: 124, f: 1},
sample{t: 125, f: 0},
sample{t: 133, f: 1},
sample{t: 143, f: 2},
},
}, res)
}
func TestDBAppenderV2_EmptyLabelsIgnored(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app1 := db.AppenderV2(ctx)
ref1, err := app1.Append(0, labels.FromStrings("a", "b"), 0, 123, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
// Add with empty label.
ref2, err := app1.Append(0, labels.FromStrings("a", "b", "c", ""), 0, 124, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
// Should be the same series.
require.Equal(t, ref1, ref2)
err = app1.Commit()
require.NoError(t, err)
}
func TestDBAppenderV2_EmptyLabelsetCausesError(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
_, err := app.Append(0, labels.Labels{}, 0, 0, 0, nil, nil, storage.AOptions{})
require.Error(t, err)
require.Equal(t, "empty labelset: invalid sample", err.Error())
}
func TestDeleteSimple_AppendV2(t *testing.T) {
const numSamples int64 = 10
cases := []struct {
Intervals tombstones.Intervals
remaint []int64
}{
{
Intervals: tombstones.Intervals{{Mint: 0, Maxt: 3}},
remaint: []int64{4, 5, 6, 7, 8, 9},
},
{
Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}},
remaint: []int64{0, 4, 5, 6, 7, 8, 9},
},
{
Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
remaint: []int64{0, 8, 9},
},
{
Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 700}},
remaint: []int64{0},
},
{ // This case is to ensure that labels and symbols are deleted.
Intervals: tombstones.Intervals{{Mint: 0, Maxt: 9}},
remaint: []int64{},
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
smpls := make([]float64, numSamples)
for i := range numSamples {
smpls[i] = rand.Float64()
app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
}
require.NoError(t, app.Commit())
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _, r := range c.Intervals {
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
}
// Compare the result.
q, err := db.Querier(0, numSamples)
require.NoError(t, err)
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
})
for {
eok, rok := expss.Next(), res.Next()
require.Equal(t, eok, rok)
if !eok {
require.Empty(t, res.Warnings())
break
}
sexp := expss.At()
sres := res.At()
require.Equal(t, sexp.Labels(), sres.Labels())
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
})
}
}
func TestAmendHistogramDatapointCausesError_AppendV2(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, 1, nil, nil, storage.AOptions{})
require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp)
require.NoError(t, app.Rollback())
h := histogram.Histogram{
Schema: 3,
Count: 52,
Sum: 2.7,
ZeroThreshold: 0.1,
ZeroCount: 42,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 10, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
}
fh := h.ToFloat(nil)
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "c"), 0, 0, 0, h.Copy(), nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "c"), 0, 0, 0, h.Copy(), nil, storage.AOptions{})
require.NoError(t, err)
h.Schema = 2
_, err = app.Append(0, labels.FromStrings("a", "c"), 0, 0, 0, h.Copy(), nil, storage.AOptions{})
require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
require.NoError(t, app.Rollback())
// Float histogram.
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "d"), 0, 0, 0, nil, fh.Copy(), storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "d"), 0, 0, 0, nil, fh.Copy(), storage.AOptions{})
require.NoError(t, err)
fh.Schema = 2
_, err = app.Append(0, labels.FromStrings("a", "d"), 0, 0, 0, nil, fh.Copy(), storage.AOptions{})
require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
require.NoError(t, app.Rollback())
}
func TestDuplicateNaNDatapointNoAmendError_AppendV2(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.NaN(), nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.NaN(), nil, nil, storage.AOptions{})
require.NoError(t, err)
}
func TestNonDuplicateNaNDatapointsCausesAmendError_AppendV2(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.Float64frombits(0x7ff0000000000001), nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.Float64frombits(0x7ff0000000000002), nil, nil, storage.AOptions{})
require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp)
}
func TestSkippingInvalidValuesInSameTxn_AppendV2(t *testing.T) {
db := newTestDB(t)
// Append AmendedValue.
ctx := context.Background()
app := db.AppenderV2(ctx)
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, 2, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
// Make sure the right value is stored.
q, err := db.Querier(0, 10)
require.NoError(t, err)
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
}, ssMap)
// Append Out of Order Value.
app = db.AppenderV2(ctx)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 10, 3, nil, nil, storage.AOptions{})
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 7, 5, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
q, err = db.Querier(0, 10)
require.NoError(t, err)
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
}, ssMap)
}
func TestDB_Snapshot_AppendV2(t *testing.T) {
db := newTestDB(t)
// append data
ctx := context.Background()
app := db.AppenderV2(ctx)
mint := int64(1414141414000)
for i := range 1000 {
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, mint+int64(i), 1.0, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// create snapshot
snap := t.TempDir()
require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close())
// reopen DB from snapshot
db = newTestDB(t, withDir(snap))
querier, err := db.Querier(mint, mint+1000)
require.NoError(t, err)
defer func() { require.NoError(t, querier.Close()) }()
// sum values
seriesSet := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
var series chunkenc.Iterator
sum := 0.0
for seriesSet.Next() {
series = seriesSet.At().Iterator(series)
for series.Next() == chunkenc.ValFloat {
_, v := series.At()
sum += v
}
require.NoError(t, series.Err())
}
require.NoError(t, seriesSet.Err())
require.Empty(t, seriesSet.Warnings())
require.Equal(t, 1000.0, sum)
}
// TestDB_Snapshot_ChunksOutsideOfCompactedRange ensures that a snapshot removes chunks samples
// that are outside the set block time range.
// See https://github.com/prometheus/prometheus/issues/5105
func TestDB_Snapshot_ChunksOutsideOfCompactedRange_AppendV2(t *testing.T) {
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
mint := int64(1414141414000)
for i := range 1000 {
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, mint+int64(i), 1.0, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
snap := t.TempDir()
// Hackingly introduce "race", by having lower max time then maxTime in last chunk.
db.head.maxTime.Sub(10)
require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close())
// reopen DB from snapshot
db = newTestDB(t, withDir(snap))
querier, err := db.Querier(mint, mint+1000)
require.NoError(t, err)
defer func() { require.NoError(t, querier.Close()) }()
// Sum values.
seriesSet := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
var series chunkenc.Iterator
sum := 0.0
for seriesSet.Next() {
series = seriesSet.At().Iterator(series)
for series.Next() == chunkenc.ValFloat {
_, v := series.At()
sum += v
}
require.NoError(t, series.Err())
}
require.NoError(t, seriesSet.Err())
require.Empty(t, seriesSet.Warnings())
// Since we snapshotted with MaxTime - 10, so expect 10 less samples.
require.Equal(t, 1000.0-10, sum)
}
func TestDB_SnapshotWithDelete_AppendV2(t *testing.T) {
const numSamples int64 = 10
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
smpls := make([]float64, numSamples)
for i := range numSamples {
smpls[i] = rand.Float64()
app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
}
require.NoError(t, app.Commit())
cases := []struct {
intervals tombstones.Intervals
remaint []int64
}{
{
intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
remaint: []int64{0, 8, 9},
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _, r := range c.intervals {
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
}
// create snapshot
snap := t.TempDir()
require.NoError(t, db.Snapshot(snap, true))
// reopen DB from snapshot
db := newTestDB(t, withDir(snap))
// Compare the result.
q, err := db.Querier(0, numSamples)
require.NoError(t, err)
defer func() { require.NoError(t, q.Close()) }()
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
})
if len(expSamples) == 0 {
require.False(t, res.Next())
return
}
for {
eok, rok := expss.Next(), res.Next()
require.Equal(t, eok, rok)
if !eok {
require.Empty(t, res.Warnings())
break
}
sexp := expss.At()
sres := res.At()
require.Equal(t, sexp.Labels(), sres.Labels())
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
})
}
}
func TestDB_e2e_AppendV2(t *testing.T) {
const (
numDatapoints = 1000
numRanges = 1000
timeInterval = int64(3)
)
// Create 8 series with 1000 data-points of different ranges and run queries.
lbls := [][]labels.Label{
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "b"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prometheus"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "127.0.0.1:9090"},
{Name: "job", Value: "prom-k8s"},
},
{
{Name: "a", Value: "c"},
{Name: "instance", Value: "localhost:9090"},
{Name: "job", Value: "prom-k8s"},
},
}
seriesMap := map[string][]chunks.Sample{}
for _, l := range lbls {
seriesMap[labels.New(l...).String()] = []chunks.Sample{}
}
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
for _, l := range lbls {
lset := labels.New(l...)
series := []chunks.Sample{}
ts := rand.Int63n(300)
for range numDatapoints {
v := rand.Float64()
series = append(series, sample{ts, v, nil, nil})
_, err := app.Append(0, lset, 0, ts, v, nil, nil, storage.AOptions{})
require.NoError(t, err)
ts += rand.Int63n(timeInterval) + 1
}
seriesMap[lset.String()] = series
}
require.NoError(t, app.Commit())
// Query each selector on 1000 random time-ranges.
queries := []struct {
ms []*labels.Matcher
}{
{
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "b")},
},
{
ms: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b"),
labels.MustNewMatcher(labels.MatchEqual, "job", "prom-k8s"),
},
},
{
ms: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "c"),
labels.MustNewMatcher(labels.MatchEqual, "instance", "localhost:9090"),
labels.MustNewMatcher(labels.MatchEqual, "job", "prometheus"),
},
},
// TODO: Add Regexp Matchers.
}
for _, qry := range queries {
matched := labels.Slice{}
for _, l := range lbls {
s := labels.Selector(qry.ms)
ls := labels.New(l...)
if s.Matches(ls) {
matched = append(matched, ls)
}
}
sort.Sort(matched)
for range numRanges {
mint := rand.Int63n(300)
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
expected := map[string][]chunks.Sample{}
// Build the mockSeriesSet.
for _, m := range matched {
smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
if len(smpls) > 0 {
expected[m.String()] = smpls
}
}
q, err := db.Querier(mint, maxt)
require.NoError(t, err)
ss := q.Select(ctx, false, nil, qry.ms...)
result := map[string][]chunks.Sample{}
for ss.Next() {
x := ss.At()
smpls, err := storage.ExpandSamples(x.Iterator(nil), newSample)
require.NoError(t, err)
if len(smpls) > 0 {
result[x.Labels().String()] = smpls
}
}
require.NoError(t, ss.Err())
require.Empty(t, ss.Warnings())
require.Equal(t, expected, result)
q.Close()
}
}
}
func TestWALFlushedOnDBClose_AppendV2(t *testing.T) {
db := newTestDB(t)
lbls := labels.FromStrings("labelname", "labelvalue")
ctx := context.Background()
app := db.AppenderV2(ctx)
_, err := app.Append(0, lbls, 0, 0, 1, nil, nil, storage.AOptions{})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.NoError(t, db.Close())
db = newTestDB(t, withDir(db.Dir()))
q, err := db.Querier(0, 1)
require.NoError(t, err)
values, ws, err := q.LabelValues(ctx, "labelname", nil)
require.NoError(t, err)
require.Empty(t, ws)
require.Equal(t, []string{"labelvalue"}, values)
}
func TestWALSegmentSizeOptions_AppendV2(t *testing.T) {
tests := map[int]func(dbdir string, segmentSize int){
// Default Wal Size.
0: func(dbDir string, _ int) {
filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
require.NoError(t, err)
files := []os.FileInfo{}
for _, f := range filesAndDir {
if !f.IsDir() {
fi, err := f.Info()
require.NoError(t, err)
files = append(files, fi)
}
}
// All the full segment files (all but the last) should match the segment size option.
for _, f := range files[:len(files)-1] {
require.Equal(t, int64(DefaultOptions().WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
}
lastFile := files[len(files)-1]
require.Greater(t, int64(DefaultOptions().WALSegmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
},
// Custom Wal Size.
2 * 32 * 1024: func(dbDir string, segmentSize int) {
filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
require.NoError(t, err)
files := []os.FileInfo{}
for _, f := range filesAndDir {
if !f.IsDir() {
fi, err := f.Info()
require.NoError(t, err)
files = append(files, fi)
}
}
require.NotEmpty(t, files, "current WALSegmentSize should result in more than a single WAL file.")
// All the full segment files (all but the last) should match the segment size option.
for _, f := range files[:len(files)-1] {
require.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
}
lastFile := files[len(files)-1]
require.Greater(t, int64(segmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
},
// Wal disabled.
-1: func(dbDir string, _ int) {
// Check that WAL dir is not there.
_, err := os.Stat(filepath.Join(dbDir, "wal"))
require.Error(t, err)
// Check that there is chunks dir.
_, err = os.Stat(mmappedChunksDir(dbDir))
require.NoError(t, err)
},
}
for segmentSize, testFunc := range tests {
t.Run(fmt.Sprintf("WALSegmentSize %d test", segmentSize), func(t *testing.T) {
opts := DefaultOptions()
opts.WALSegmentSize = segmentSize
db := newTestDB(t, withOpts(opts))
for i := range int64(155) {
app := db.AppenderV2(context.Background())
ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), 0, i, rand.Float64(), nil, nil, storage.AOptions{})
require.NoError(t, err)
for j := int64(1); j <= 78; j++ {
_, err := app.Append(ref, labels.EmptyLabels(), 0, i+j, rand.Float64(), nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
require.NoError(t, db.Close())
testFunc(db.Dir(), opts.WALSegmentSize)
})
}
}
// https://github.com/prometheus/prometheus/issues/9846
// https://github.com/prometheus/prometheus/issues/9859
func TestWALReplayRaceOnSamplesLoggedBeforeSeries_AppendV2(t *testing.T) {
const (
numRuns = 1
numSamplesBeforeSeriesCreation = 1000
)
// We test both with few and many samples appended after series creation. If samples are < 120 then there's no
// mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL.
for _, numSamplesAfterSeriesCreation := range []int{1, 1000} {
for run := 1; run <= numRuns; run++ {
t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) {
testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation)
})
}
}
}
func testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int) {
const numSeries = 1000
db := newTestDB(t)
db.DisableCompactions()
for seriesRef := 1; seriesRef <= numSeries; seriesRef++ {
// Log samples before the series is logged to the WAL.
var enc record.Encoder
var samples []record.RefSample
for ts := range numSamplesBeforeSeriesCreation {
samples = append(samples, record.RefSample{
Ref: chunks.HeadSeriesRef(uint64(seriesRef)),
T: int64(ts),
V: float64(ts),
})
}
err := db.Head().wal.Log(enc.Samples(samples, nil))
require.NoError(t, err)
// Add samples via appender so that they're logged after the series in the WAL.
app := db.AppenderV2(context.Background())
lbls := labels.FromStrings("series_id", strconv.Itoa(seriesRef))
for ts := numSamplesBeforeSeriesCreation; ts < numSamplesBeforeSeriesCreation+numSamplesAfterSeriesCreation; ts++ {
_, err := app.Append(0, lbls, 0, int64(ts), float64(ts), nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
require.NoError(t, db.Close())
// Reopen the DB, replaying the WAL.
db = newTestDB(t, withDir(db.Dir()))
// Query back chunks for all series.
q, err := db.ChunkQuerier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
set := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "series_id", ".+"))
actualSeries := 0
var chunksIt chunks.Iterator
for set.Next() {
actualSeries++
actualChunks := 0
chunksIt = set.At().Iterator(chunksIt)
for chunksIt.Next() {
actualChunks++
}
require.NoError(t, chunksIt.Err())
// We expect 1 chunk every 120 samples after series creation.
require.Equalf(t, (numSamplesAfterSeriesCreation/120)+1, actualChunks, "series: %s", set.At().Labels().String())
}
require.NoError(t, set.Err())
require.Equal(t, numSeries, actualSeries)
}
func TestTombstoneClean_AppendV2(t *testing.T) {
t.Parallel()
const numSamples int64 = 10
db := newTestDB(t)
ctx := context.Background()
app := db.AppenderV2(ctx)
smpls := make([]float64, numSamples)
for i := range numSamples {
smpls[i] = rand.Float64()
app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
}
require.NoError(t, app.Commit())
cases := []struct {
intervals tombstones.Intervals
remaint []int64
}{
{
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_other.go | tsdb/head_other.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !dedupelabels
package tsdb
import (
"log/slog"
"github.com/prometheus/prometheus/model/labels"
)
// Helper method to access labels; trivial when not using dedupelabels.
func (s *memSeries) labels() labels.Labels {
return s.lset
}
// RebuildSymbolTable is a no-op when not using dedupelabels.
func (*Head) RebuildSymbolTable(*slog.Logger) *labels.SymbolTable {
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/testutil.go | tsdb/testutil.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"testing"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
const (
float = "float"
intHistogram = "integer histogram"
floatHistogram = "float histogram"
customBucketsIntHistogram = "custom buckets int histogram"
customBucketsFloatHistogram = "custom buckets float histogram"
gaugeIntHistogram = "gauge int histogram"
gaugeFloatHistogram = "gauge float histogram"
)
type testValue struct {
Ts int64
V int64
CounterResetHeader histogram.CounterResetHint
}
type sampleTypeScenario struct {
sampleType string
appendFunc func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)
sampleFunc func(ts, value int64) sample
}
var sampleTypeScenarios = map[string]sampleTypeScenario{
float: {
sampleType: sampleMetricTypeFloat,
appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, f: float64(value)}
ref, err := appender.Append(0, lbls, ts, s.f)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, f: float64(value)}
},
},
intHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)}
},
},
floatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)}
},
},
customBucketsIntHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)}
},
},
customBucketsFloatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)}
},
},
gaugeIntHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)}
},
},
gaugeFloatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)}
},
},
}
// requireEqualSeries checks that the actual series are equal to the expected ones. It ignores the counter reset hints for histograms.
func requireEqualSeries(t *testing.T, expected, actual map[string][]chunks.Sample, ignoreCounterResets bool) {
for name, expectedItem := range expected {
actualItem, ok := actual[name]
require.True(t, ok, "Expected series %s not found", name)
if ignoreCounterResets {
requireEqualSamples(t, name, expectedItem, actualItem, requireEqualSamplesIgnoreCounterResets)
} else {
requireEqualSamples(t, name, expectedItem, actualItem)
}
}
for name := range actual {
_, ok := expected[name]
require.True(t, ok, "Unexpected series %s", name)
}
}
func requireEqualOOOSamples(t *testing.T, expectedSamples int, db *DB) {
require.Equal(t, float64(expectedSamples),
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat))+
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram)),
"number of ooo appended samples mismatch")
}
type requireEqualSamplesOption int
const (
requireEqualSamplesNoOption requireEqualSamplesOption = iota
requireEqualSamplesIgnoreCounterResets
requireEqualSamplesInUseBucketCompare
)
func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sample, options ...requireEqualSamplesOption) {
var (
ignoreCounterResets bool
inUseBucketCompare bool
)
for _, option := range options {
switch option {
case requireEqualSamplesIgnoreCounterResets:
ignoreCounterResets = true
case requireEqualSamplesInUseBucketCompare:
inUseBucketCompare = true
}
}
require.Len(t, actual, len(expected), "Length not equal to expected for %s", name)
for i, s := range expected {
expectedSample := s
actualSample := actual[i]
require.Equal(t, expectedSample.T(), actualSample.T(), "Different timestamps for %s[%d]", name, i)
require.Equal(t, expectedSample.Type().String(), actualSample.Type().String(), "Different types for %s[%d] at ts %d", name, i, expectedSample.T())
switch {
case s.H() != nil:
{
expectedHist := expectedSample.H()
actualHist := actualSample.H()
if ignoreCounterResets && expectedHist.CounterResetHint != histogram.GaugeType {
expectedHist.CounterResetHint = histogram.UnknownCounterReset
actualHist.CounterResetHint = histogram.UnknownCounterReset
} else {
require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint))
}
if inUseBucketCompare {
expectedSample.H().Compact(0)
actualSample.H().Compact(0)
}
require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
}
case s.FH() != nil:
{
expectedHist := expectedSample.FH()
actualHist := actualSample.FH()
if ignoreCounterResets {
expectedHist.CounterResetHint = histogram.UnknownCounterReset
actualHist.CounterResetHint = histogram.UnknownCounterReset
} else {
require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint))
}
if inUseBucketCompare {
expectedSample.FH().Compact(0)
actualSample.FH().Compact(0)
}
require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
}
default:
expectedFloat := expectedSample.F()
actualFloat := actualSample.F()
require.Equal(t, expectedFloat, actualFloat, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
}
}
}
func counterResetAsString(h histogram.CounterResetHint) string {
switch h {
case histogram.UnknownCounterReset:
return "UnknownCounterReset"
case histogram.CounterReset:
return "CounterReset"
case histogram.NotCounterReset:
return "NotCounterReset"
case histogram.GaugeType:
return "GaugeType"
}
panic("Unexpected counter reset type")
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/exemplar.go | tsdb/exemplar.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"slices"
"sync"
"unicode/utf8"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
)
const (
// Indicates that there is no index entry for an exemplar.
noExemplar = -1
// Estimated number of exemplars per series, for sizing the index.
estimatedExemplarsPerSeries = 16
)
type CircularExemplarStorage struct {
lock sync.RWMutex
exemplars []circularBufferEntry
nextIndex int
metrics *ExemplarMetrics
// Map of series labels as a string to index entry, which points to the first
// and last exemplar for the series in the exemplars circular buffer.
index map[string]*indexEntry
}
type indexEntry struct {
oldest int
newest int
seriesLabels labels.Labels
}
type circularBufferEntry struct {
exemplar exemplar.Exemplar
next int
ref *indexEntry
}
type ExemplarMetrics struct {
exemplarsAppended prometheus.Counter
exemplarsInStorage prometheus.Gauge
seriesWithExemplarsInStorage prometheus.Gauge
lastExemplarsTs prometheus.Gauge
maxExemplars prometheus.Gauge
outOfOrderExemplars prometheus.Counter
}
func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics {
m := ExemplarMetrics{
exemplarsAppended: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_exemplar_exemplars_appended_total",
Help: "Total number of appended exemplars.",
}),
exemplarsInStorage: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_exemplar_exemplars_in_storage",
Help: "Number of exemplars currently in circular storage.",
}),
seriesWithExemplarsInStorage: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_exemplar_series_with_exemplars_in_storage",
Help: "Number of series with exemplars currently in circular storage.",
}),
lastExemplarsTs: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds",
Help: "The timestamp of the oldest exemplar stored in circular storage. Useful to check for what time" +
"range the current exemplar buffer limit allows. This usually means the last timestamp" +
"for all exemplars for a typical setup. This is not true though if one of the series timestamp is in future compared to rest series.",
}),
outOfOrderExemplars: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_exemplar_out_of_order_exemplars_total",
Help: "Total number of out of order exemplar ingestion failed attempts.",
}),
maxExemplars: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_exemplar_max_exemplars",
Help: "Total number of exemplars the exemplar storage can store, resizeable.",
}),
}
if reg != nil {
reg.MustRegister(
m.exemplarsAppended,
m.exemplarsInStorage,
m.seriesWithExemplarsInStorage,
m.lastExemplarsTs,
m.outOfOrderExemplars,
m.maxExemplars,
)
}
return &m
}
// NewCircularExemplarStorage creates a circular in memory exemplar storage.
// If we assume the average case 95 bytes per exemplar we can fit 5651272 exemplars in
// 1GB of extra memory, accounting for the fact that this is heap allocated space.
// If len <= 0, then the exemplar storage is essentially a noop storage but can later be
// resized to store exemplars.
func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) {
if length < 0 {
length = 0
}
c := &CircularExemplarStorage{
exemplars: make([]circularBufferEntry, length),
index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries),
metrics: m,
}
c.metrics.maxExemplars.Set(float64(length))
return c, nil
}
func (ce *CircularExemplarStorage) ApplyConfig(cfg *config.Config) error {
ce.Resize(cfg.StorageConfig.ExemplarsConfig.MaxExemplars)
return nil
}
func (ce *CircularExemplarStorage) Appender() *CircularExemplarStorage {
return ce
}
func (ce *CircularExemplarStorage) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
return ce, nil
}
func (ce *CircularExemplarStorage) Querier(context.Context) (storage.ExemplarQuerier, error) {
return ce, nil
}
// Select returns exemplars for a given set of label matchers.
func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
ret := make([]exemplar.QueryResult, 0)
ce.lock.RLock()
defer ce.lock.RUnlock()
if len(ce.exemplars) == 0 {
return ret, nil
}
// Loop through each index entry, which will point us to first/last exemplar for each series.
for _, idx := range ce.index {
var se exemplar.QueryResult
e := ce.exemplars[idx.oldest]
if e.exemplar.Ts > end || ce.exemplars[idx.newest].exemplar.Ts < start {
continue
}
if !matchesSomeMatcherSet(idx.seriesLabels, matchers) {
continue
}
se.SeriesLabels = idx.seriesLabels
// Loop through all exemplars in the circular buffer for the current series.
for e.exemplar.Ts <= end {
if e.exemplar.Ts >= start {
se.Exemplars = append(se.Exemplars, e.exemplar)
}
if e.next == noExemplar {
break
}
e = ce.exemplars[e.next]
}
if len(se.Exemplars) > 0 {
ret = append(ret, se)
}
}
slices.SortFunc(ret, func(a, b exemplar.QueryResult) int {
return labels.Compare(a.SeriesLabels, b.SeriesLabels)
})
return ret, nil
}
func matchesSomeMatcherSet(lbls labels.Labels, matchers [][]*labels.Matcher) bool {
Outer:
for _, ms := range matchers {
for _, m := range ms {
if !m.Matches(lbls.Get(m.Name)) {
continue Outer
}
}
return true
}
return false
}
func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.Exemplar) error {
var buf [1024]byte
seriesLabels := l.Bytes(buf[:])
// TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale.
// Optimize by moving the lock to be per series (& benchmark it).
ce.lock.RLock()
defer ce.lock.RUnlock()
return ce.validateExemplar(ce.index[string(seriesLabels)], e, false)
}
// Not thread safe. The appended parameters tells us whether this is an external validation, or internal
// as a result of an AddExemplar call, in which case we should update any relevant metrics.
func (ce *CircularExemplarStorage) validateExemplar(idx *indexEntry, e exemplar.Exemplar, appended bool) error {
if len(ce.exemplars) == 0 {
return storage.ErrExemplarsDisabled
}
// Exemplar label length does not include chars involved in text rendering such as quotes
// equals sign, or commas. See definition of const ExemplarMaxLabelLength.
labelSetLen := 0
if err := e.Labels.Validate(func(l labels.Label) error {
labelSetLen += utf8.RuneCountInString(l.Name)
labelSetLen += utf8.RuneCountInString(l.Value)
if labelSetLen > exemplar.ExemplarMaxLabelSetLength {
return storage.ErrExemplarLabelLength
}
return nil
}); err != nil {
return err
}
if idx == nil {
return nil
}
// Check for duplicate vs last stored exemplar for this series.
// NB these are expected, and appending them is a no-op.
// For floats and classic histograms, there is only 1 exemplar per series,
// so this is sufficient. For native histograms with multiple exemplars per series,
// we have another check below.
newestExemplar := ce.exemplars[idx.newest].exemplar
if newestExemplar.Equals(e) {
return storage.ErrDuplicateExemplar
}
// Since during the scrape the exemplars are sorted first by timestamp, then value, then labels,
// if any of these conditions are true, we know that the exemplar is either a duplicate
// of a previous one (but not the most recent one as that is checked above) or out of order.
// We now allow exemplars with duplicate timestamps as long as they have different values and/or labels
// since that can happen for different buckets of a native histogram.
// We do not distinguish between duplicates and out of order as iterating through the exemplars
// to check for that would be expensive (versus just comparing with the most recent one) especially
// since this is run under a lock, and not worth it as we just need to return an error so we do not
// append the exemplar.
if e.Ts < newestExemplar.Ts ||
(e.Ts == newestExemplar.Ts && e.Value < newestExemplar.Value) ||
(e.Ts == newestExemplar.Ts && e.Value == newestExemplar.Value && e.Labels.Hash() < newestExemplar.Labels.Hash()) {
if appended {
ce.metrics.outOfOrderExemplars.Inc()
}
return storage.ErrOutOfOrderExemplar
}
return nil
}
// Resize changes the size of exemplar buffer by allocating a new buffer and migrating data to it.
// Exemplars are kept when possible. Shrinking will discard oldest data (in order of ingest) as needed.
func (ce *CircularExemplarStorage) Resize(l int64) int {
// Accept negative values as just 0 size.
if l <= 0 {
l = 0
}
ce.lock.Lock()
defer ce.lock.Unlock()
if l == int64(len(ce.exemplars)) {
return 0
}
oldBuffer := ce.exemplars
oldNextIndex := int64(ce.nextIndex)
ce.exemplars = make([]circularBufferEntry, l)
ce.index = make(map[string]*indexEntry, l/estimatedExemplarsPerSeries)
ce.nextIndex = 0
// Replay as many entries as needed, starting with oldest first.
count := min(l, int64(len(oldBuffer)))
migrated := 0
if l > 0 && len(oldBuffer) > 0 {
// Rewind previous next index by count with wrap-around.
// This math is essentially looking at nextIndex, where we would write the next exemplar to,
// and find the index in the old exemplar buffer that we should start migrating exemplars from.
// This way we don't migrate exemplars that would just be overwritten when migrating later exemplars.
startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer))
var buf [1024]byte
for i := range count {
idx := (startIndex + i) % int64(len(oldBuffer))
if oldBuffer[idx].ref != nil {
ce.migrate(&oldBuffer[idx], buf[:])
migrated++
}
}
}
ce.computeMetrics()
ce.metrics.maxExemplars.Set(float64(l))
return migrated
}
// migrate is like AddExemplar but reuses existing structs. Expected to be called in batch and requires
// external lock and does not compute metrics.
func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry, buf []byte) {
seriesLabels := entry.ref.seriesLabels.Bytes(buf[:0])
idx, ok := ce.index[string(seriesLabels)]
if !ok {
idx = entry.ref
idx.oldest = ce.nextIndex
ce.index[string(seriesLabels)] = idx
} else {
entry.ref = idx
ce.exemplars[idx.newest].next = ce.nextIndex
}
idx.newest = ce.nextIndex
entry.next = noExemplar
ce.exemplars[ce.nextIndex] = *entry
ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
}
func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
// TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale.
// Optimize by moving the lock to be per series (& benchmark it).
ce.lock.Lock()
defer ce.lock.Unlock()
if len(ce.exemplars) == 0 {
return storage.ErrExemplarsDisabled
}
var buf [1024]byte
seriesLabels := l.Bytes(buf[:])
idx, ok := ce.index[string(seriesLabels)]
err := ce.validateExemplar(idx, e, true)
if err != nil {
if errors.Is(err, storage.ErrDuplicateExemplar) {
// Duplicate exemplar, noop.
return nil
}
return err
}
if !ok {
idx = &indexEntry{oldest: ce.nextIndex, seriesLabels: l}
ce.index[string(seriesLabels)] = idx
} else {
ce.exemplars[idx.newest].next = ce.nextIndex
}
if prev := &ce.exemplars[ce.nextIndex]; prev.ref != nil {
// There exists an exemplar already on this ce.nextIndex entry,
// drop it, to make place for others.
if prev.next == noExemplar {
// Last item for this series, remove index entry.
var buf [1024]byte
prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
delete(ce.index, string(prevLabels))
} else {
prev.ref.oldest = prev.next
}
}
// Default the next value to -1 (which we use to detect that we've iterated through all exemplars for a series in Select)
// since this is the first exemplar stored for this series.
ce.exemplars[ce.nextIndex].next = noExemplar
ce.exemplars[ce.nextIndex].exemplar = e
ce.exemplars[ce.nextIndex].ref = idx
idx.newest = ce.nextIndex
ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
ce.metrics.exemplarsAppended.Inc()
ce.computeMetrics()
return nil
}
func (ce *CircularExemplarStorage) computeMetrics() {
ce.metrics.seriesWithExemplarsInStorage.Set(float64(len(ce.index)))
if len(ce.exemplars) == 0 {
ce.metrics.exemplarsInStorage.Set(float64(0))
ce.metrics.lastExemplarsTs.Set(float64(0))
return
}
if ce.exemplars[ce.nextIndex].ref != nil {
ce.metrics.exemplarsInStorage.Set(float64(len(ce.exemplars)))
ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[ce.nextIndex].exemplar.Ts) / 1000)
return
}
// We did not yet fill the buffer.
ce.metrics.exemplarsInStorage.Set(float64(ce.nextIndex))
if ce.exemplars[0].ref != nil {
ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts) / 1000)
}
}
// IterateExemplars iterates through all the exemplars from oldest to newest appended and calls
// the given function on all of them till the end (or) till the first function call that returns an error.
func (ce *CircularExemplarStorage) IterateExemplars(f func(seriesLabels labels.Labels, e exemplar.Exemplar) error) error {
ce.lock.RLock()
defer ce.lock.RUnlock()
idx := ce.nextIndex
l := len(ce.exemplars)
for i := 0; i < l; i, idx = i+1, (idx+1)%l {
if ce.exemplars[idx].ref == nil {
continue
}
err := f(ce.exemplars[idx].ref.seriesLabels, ce.exemplars[idx].exemplar)
if err != nil {
return err
}
}
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/blockwriter.go | tsdb/blockwriter.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"log/slog"
"math"
"os"
"github.com/oklog/ulid/v2"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
// BlockWriter is a block writer that allows appending and flushing series to disk.
type BlockWriter struct {
logger *slog.Logger
destinationDir string
head *Head
blockSize int64 // in ms
chunkDir string
}
// ErrNoSeriesAppended is returned if the series count is zero while flushing blocks.
var ErrNoSeriesAppended = errors.New("no series appended, aborting")
// NewBlockWriter creates a new block writer.
//
// The returned writer accumulates all the series in the Head block until `Flush` is called.
//
// Note that the writer will not check if the target directory exists or
// contains anything at all. It is the caller's responsibility to
// ensure that the resulting blocks do not overlap etc.
// Writer ensures the block flush is atomic (via rename).
func NewBlockWriter(logger *slog.Logger, dir string, blockSize int64) (*BlockWriter, error) {
w := &BlockWriter{
logger: logger,
destinationDir: dir,
blockSize: blockSize,
}
if err := w.initHead(); err != nil {
return nil, err
}
return w, nil
}
// initHead creates and initialises a new TSDB head.
func (w *BlockWriter) initHead() error {
chunkDir, err := os.MkdirTemp(os.TempDir(), "head")
if err != nil {
return fmt.Errorf("create temp dir: %w", err)
}
w.chunkDir = chunkDir
opts := DefaultHeadOptions()
opts.ChunkRange = w.blockSize
opts.ChunkDirRoot = w.chunkDir
h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats())
if err != nil {
return fmt.Errorf("tsdb.NewHead: %w", err)
}
w.head = h
return w.head.Init(math.MinInt64)
}
// Appender returns a new appender on the database.
// Appender can't be called concurrently. However, the returned Appender can safely be used concurrently.
func (w *BlockWriter) Appender(ctx context.Context) storage.Appender {
return w.head.Appender(ctx)
}
// AppenderV2 returns a new appender on the database.
// AppenderV2 can't be called concurrently. However, the returned AppenderV2 can safely be used concurrently.
func (w *BlockWriter) AppenderV2(ctx context.Context) storage.AppenderV2 {
return w.head.AppenderV2(ctx)
}
// Flush implements the Writer interface. This is where actual block writing
// happens. After flush completes, no writes can be done.
func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) {
mint := w.head.MinTime()
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
maxt := w.head.MaxTime() + 1
w.logger.Info("flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt))
compactor, err := NewLeveledCompactor(ctx,
nil,
w.logger,
[]int64{w.blockSize},
chunkenc.NewPool(), nil)
if err != nil {
return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err)
}
ids, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil)
if err != nil {
return ulid.ULID{}, fmt.Errorf("compactor write: %w", err)
}
// No block was produced. Caller is responsible to check empty
// ulid.ULID based on its use case.
if len(ids) == 0 {
return ulid.ULID{}, nil
}
return ids[0], nil
}
func (w *BlockWriter) Close() error {
defer func() {
if err := os.RemoveAll(w.chunkDir); err != nil {
w.logger.Error("error in deleting BlockWriter files", "err", err)
}
}()
return w.head.Close()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/db.go | tsdb/db.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tsdb implements a time series storage for float64 sample data.
package tsdb
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"log/slog"
"math"
"math/rand"
"os"
"path/filepath"
"slices"
"strings"
"sync"
"time"
"github.com/oklog/ulid/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
_ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minimum Go version is met.
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/features"
)
const (
// DefaultBlockDuration in milliseconds.
DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond)
// DefaultCompactionDelayMaxPercent in percentage.
DefaultCompactionDelayMaxPercent = 10
// Block dir suffixes to make deletion and creation operations atomic.
// We decided to do suffixes instead of creating meta.json as last (or delete as first) one,
// because in error case you still can recover meta.json from the block content within local TSDB dir.
// TODO(bwplotka): TSDB can end up with various .tmp files (e.g meta.json.tmp, WAL or segment tmp file. Think
// about removing those too on start to save space. Currently only blocks tmp dirs are removed.
tmpForDeletionBlockDirSuffix = ".tmp-for-deletion"
tmpForCreationBlockDirSuffix = ".tmp-for-creation"
// Pre-2.21 tmp dir suffix, used in clean-up functions.
tmpLegacy = ".tmp"
)
// ErrNotReady is returned if the underlying storage is not ready yet.
var ErrNotReady = errors.New("TSDB not ready")
// DefaultOptions used for the DB. They are reasonable for setups using
// millisecond precision timestamps.
func DefaultOptions() *Options {
return &Options{
WALSegmentSize: wlog.DefaultSegmentSize,
MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize,
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
MinBlockDuration: DefaultBlockDuration,
MaxBlockDuration: DefaultBlockDuration,
NoLockfile: false,
SamplesPerChunk: DefaultSamplesPerChunk,
WALCompression: compression.None,
StripeSize: DefaultStripeSize,
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
IsolationDisabled: defaultIsolationDisabled,
HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize,
OutOfOrderCapMax: DefaultOutOfOrderCapMax,
EnableOverlappingCompaction: true,
EnableSharding: false,
EnableDelayedCompaction: false,
CompactionDelayMaxPercent: DefaultCompactionDelayMaxPercent,
CompactionDelay: time.Duration(0),
PostingsDecoderFactory: DefaultPostingsDecoderFactory,
BlockReloadInterval: 1 * time.Minute,
}
}
// Options of the DB storage.
type Options struct {
// Segments (wal files) max size.
// WALSegmentSize = 0, segment size is default size.
// WALSegmentSize > 0, segment size is WALSegmentSize.
// WALSegmentSize < 0, wal is disabled.
WALSegmentSize int
// MaxBlockChunkSegmentSize is the max size of block chunk segment files.
// MaxBlockChunkSegmentSize = 0, chunk segment size is default size.
// MaxBlockChunkSegmentSize > 0, chunk segment size is MaxBlockChunkSegmentSize.
MaxBlockChunkSegmentSize int64
// Duration of persisted data to keep.
// Unit agnostic as long as unit is consistent with MinBlockDuration and MaxBlockDuration.
// Typically it is in milliseconds.
RetentionDuration int64
// Maximum number of bytes in blocks to be retained.
// 0 or less means disabled.
// NOTE: For proper storage calculations need to consider
// the size of the WAL folder which is not added when calculating
// the current size of the database.
MaxBytes int64
// NoLockfile disables creation and consideration of a lock file.
NoLockfile bool
// WALCompression configures the compression type to use on records in the WAL.
WALCompression compression.Type
// Maximum number of CPUs that can simultaneously processes WAL replay.
// If it is <=0, then GOMAXPROCS is used.
WALReplayConcurrency int
// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
StripeSize int
// The timestamp range of head blocks after which they get persisted.
// It's the minimum duration of any persisted block.
// Unit agnostic as long as unit is consistent with RetentionDuration and MaxBlockDuration.
// Typically it is in milliseconds.
MinBlockDuration int64
// The maximum timestamp range of compacted blocks.
// Unit agnostic as long as unit is consistent with MinBlockDuration and RetentionDuration.
// Typically it is in milliseconds.
MaxBlockDuration int64
// HeadChunksWriteBufferSize configures the write buffer size used by the head chunks mapper.
HeadChunksWriteBufferSize int
// HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper.
HeadChunksWriteQueueSize int
// SamplesPerChunk configures the target number of samples per chunk.
SamplesPerChunk int
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
SeriesLifecycleCallback SeriesLifecycleCallback
// BlocksToDelete is a function which returns the blocks which can be deleted.
// It is always the default time and size based retention in Prometheus and
// mainly meant for external users who import TSDB.
BlocksToDelete BlocksToDeleteFunc
// Enables the in memory exemplar storage.
EnableExemplarStorage bool
// Enables the snapshot of in-memory chunks on shutdown. This makes restarts faster.
EnableMemorySnapshotOnShutdown bool
// MaxExemplars sets the size, in # of exemplars stored, of the single circular buffer used to store exemplars in memory.
// See tsdb/exemplar.go, specifically the CircularExemplarStorage struct and it's constructor NewCircularExemplarStorage.
MaxExemplars int64
// Disables isolation between reads and in-flight appends.
IsolationDisabled bool
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
// This can change during run-time, so this value from here should only be used
// while initialising.
OutOfOrderTimeWindow int64
// OutOfOrderCapMax is maximum capacity for OOO chunks (in samples).
// If it is <=0, the default value is assumed.
OutOfOrderCapMax int64
// Compaction of overlapping blocks are allowed if EnableOverlappingCompaction is true.
// This is an optional flag for overlapping blocks.
// The reason why this flag exists is because there are various users of the TSDB
// that do not want vertical compaction happening on ingest time. Instead,
// they'd rather keep overlapping blocks and let another component do the overlapping compaction later.
EnableOverlappingCompaction bool
// EnableSharding enables query sharding support in TSDB.
EnableSharding bool
// EnableDelayedCompaction, when set to true, assigns a random value to CompactionDelay during DB opening.
// When set to false, delayed compaction is disabled, unless CompactionDelay is set directly.
EnableDelayedCompaction bool
// CompactionDelay delays the start time of auto compactions.
// It can be increased by up to one minute if the DB does not commit too often.
CompactionDelay time.Duration
// CompactionDelayMaxPercent is the upper limit for CompactionDelay, specified as a percentage of the head chunk range.
CompactionDelayMaxPercent int
// NewCompactorFunc is a function that returns a TSDB compactor.
NewCompactorFunc NewCompactorFunc
// BlockQuerierFunc is a function to return storage.Querier from a BlockReader.
BlockQuerierFunc BlockQuerierFunc
// BlockChunkQuerierFunc is a function to return storage.ChunkQuerier from a BlockReader.
BlockChunkQuerierFunc BlockChunkQuerierFunc
// PostingsDecoderFactory allows users to customize postings decoders based on BlockMeta.
// By default, DefaultPostingsDecoderFactory will be used to create raw posting decoder.
PostingsDecoderFactory PostingsDecoderFactory
// UseUncachedIO allows bypassing the page cache when appropriate.
UseUncachedIO bool
// EnableSTAsZeroSample represents 'created-timestamp-zero-ingestion' feature flag.
// If true, ST, if non-zero and earlier than sample timestamp, will be stored
// as a zero sample before the actual sample.
//
// The zero sample is best-effort, only debug log on failure is emitted.
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableSTAsZeroSample bool
// EnableMetadataWALRecords represents 'metadata-wal-records' feature flag.
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableMetadataWALRecords bool
// BlockCompactionExcludeFunc is a function which returns true for blocks that should NOT be compacted.
// It's passed down to the TSDB compactor.
BlockCompactionExcludeFunc BlockExcludeFilterFunc
// BlockReloadInterval is the interval at which blocks are reloaded.
BlockReloadInterval time.Duration
// FeatureRegistry is used to register TSDB features.
FeatureRegistry features.Collector
}
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{}
type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, error)
type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error)
// DB handles reads and writes of time series falling into
// a hashed partition of a seriedb.
type DB struct {
dir string
locker *tsdbutil.DirLocker
logger *slog.Logger
metrics *dbMetrics
opts *Options
chunkPool chunkenc.Pool
compactor Compactor
blocksToDelete BlocksToDeleteFunc
// mtx must be held when modifying the general block layout or lastGarbageCollectedMmapRef.
mtx sync.RWMutex
blocks []*Block
// The last OOO chunk that was compacted and written to disk. New queriers must not read chunks less
// than or equal to this reference, as these chunks could be garbage collected at any time.
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
head *Head
compactc chan struct{}
donec chan struct{}
stopc chan struct{}
// cmtx ensures that compactions and deletions don't run simultaneously.
cmtx sync.Mutex
// autoCompactMtx ensures that no compaction gets triggered while
// changing the autoCompact var.
autoCompactMtx sync.Mutex
autoCompact bool
// retentionMtx protects access to retention configuration values that can
// be updated at runtime through config file changes.
retentionMtx sync.RWMutex
// Cancel a running compaction when a shutdown is initiated.
compactCancel context.CancelFunc
// timeWhenCompactionDelayStarted helps delay the compactions start time.
timeWhenCompactionDelayStarted time.Time
// oooWasEnabled is true if out of order support was enabled at least one time
// during the time TSDB was up. In which case we need to keep supporting
// out-of-order compaction and vertical queries.
oooWasEnabled atomic.Bool
writeNotified wlog.WriteNotified
registerer prometheus.Registerer
blockQuerierFunc BlockQuerierFunc
blockChunkQuerierFunc BlockChunkQuerierFunc
}
type dbMetrics struct {
loadedBlocks prometheus.GaugeFunc
symbolTableSize prometheus.GaugeFunc
reloads prometheus.Counter
reloadsFailed prometheus.Counter
compactionsFailed prometheus.Counter
compactionsTriggered prometheus.Counter
compactionsSkipped prometheus.Counter
sizeRetentionCount prometheus.Counter
timeRetentionCount prometheus.Counter
startTime prometheus.GaugeFunc
tombCleanTimer prometheus.Histogram
blocksBytes prometheus.Gauge
maxBytes prometheus.Gauge
retentionDuration prometheus.Gauge
}
func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
m := &dbMetrics{}
m.loadedBlocks = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_blocks_loaded",
Help: "Number of currently loaded data blocks",
}, func() float64 {
db.mtx.RLock()
defer db.mtx.RUnlock()
return float64(len(db.blocks))
})
m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_symbol_table_size_bytes",
Help: "Size of symbol table in memory for loaded blocks",
}, func() float64 {
db.mtx.RLock()
blocks := db.blocks
db.mtx.RUnlock()
symTblSize := uint64(0)
for _, b := range blocks {
symTblSize += b.GetSymbolTableSize()
}
return float64(symTblSize)
})
m.reloads = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_total",
Help: "Number of times the database reloaded block data from disk.",
})
m.reloadsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_failures_total",
Help: "Number of times the database failed to reloadBlocks block data from disk.",
})
m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_triggered_total",
Help: "Total number of triggered compactions for the partition.",
})
m.compactionsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_failed_total",
Help: "Total number of compactions that failed for the partition.",
})
m.timeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_time_retentions_total",
Help: "The number of times that blocks were deleted because the maximum time limit was exceeded.",
})
m.compactionsSkipped = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_skipped_total",
Help: "Total number of skipped compactions due to disabled auto compaction.",
})
m.startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_lowest_timestamp",
Help: "Lowest timestamp value stored in the database. The unit is decided by the library consumer.",
}, func() float64 {
db.mtx.RLock()
defer db.mtx.RUnlock()
if len(db.blocks) == 0 {
return float64(db.head.MinTime())
}
return float64(db.blocks[0].meta.MinTime)
})
m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_tombstone_cleanup_seconds",
Help: "The time taken to recompact blocks to remove tombstones.",
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
})
m.blocksBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_storage_blocks_bytes",
Help: "The number of bytes that are currently used for local storage by all blocks.",
})
m.maxBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_retention_limit_bytes",
Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled",
})
m.retentionDuration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_retention_limit_seconds",
Help: "How long to retain samples in storage.",
})
m.sizeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_size_retentions_total",
Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.",
})
if r != nil {
r.MustRegister(
m.loadedBlocks,
m.symbolTableSize,
m.reloads,
m.reloadsFailed,
m.compactionsFailed,
m.compactionsTriggered,
m.compactionsSkipped,
m.sizeRetentionCount,
m.timeRetentionCount,
m.startTime,
m.tombCleanTimer,
m.blocksBytes,
m.maxBytes,
m.retentionDuration,
)
}
return m
}
// DBStats contains statistics about the DB separated by component (eg. head).
// They are available before the DB has finished initializing.
type DBStats struct {
Head *HeadStats
}
// NewDBStats returns a new DBStats object initialized using the
// new function from each component.
func NewDBStats() *DBStats {
return &DBStats{
Head: NewHeadStats(),
}
}
// ErrClosed is returned when the db is closed.
var ErrClosed = errors.New("db already closed")
// DBReadOnly provides APIs for read only operations on a database.
// Current implementation doesn't support concurrency so
// all API calls should happen in the same go routine.
type DBReadOnly struct {
logger *slog.Logger
dir string
sandboxDir string
closers []io.Closer
closed chan struct{}
}
// OpenDBReadOnly opens DB in the given directory for read only operations.
func OpenDBReadOnly(dir, sandboxDirRoot string, l *slog.Logger) (*DBReadOnly, error) {
if _, err := os.Stat(dir); err != nil {
return nil, fmt.Errorf("opening the db dir: %w", err)
}
if sandboxDirRoot == "" {
sandboxDirRoot = dir
}
sandboxDir, err := os.MkdirTemp(sandboxDirRoot, "tmp_dbro_sandbox")
if err != nil {
return nil, fmt.Errorf("setting up sandbox dir: %w", err)
}
if l == nil {
l = promslog.NewNopLogger()
}
return &DBReadOnly{
logger: l,
dir: dir,
sandboxDir: sandboxDir,
closed: make(chan struct{}),
}, nil
}
// FlushWAL creates a new block containing all data that's currently in the memory buffer/WAL.
// Samples that are in existing blocks will not be written to the new block.
// Note that if the read only database is running concurrently with a
// writable database then writing the WAL to the database directory can race.
func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
blockReaders, err := db.Blocks()
if err != nil {
return fmt.Errorf("read blocks: %w", err)
}
maxBlockTime := int64(math.MinInt64)
if len(blockReaders) > 0 {
maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime
}
w, err := wlog.Open(db.logger, filepath.Join(db.dir, "wal"))
if err != nil {
return err
}
var wbl *wlog.WL
wblDir := filepath.Join(db.dir, wlog.WblDirName)
if _, err := os.Stat(wblDir); !os.IsNotExist(err) {
wbl, err = wlog.Open(db.logger, wblDir)
if err != nil {
return err
}
}
opts := DefaultHeadOptions()
opts.ChunkDirRoot = db.dir
head, err := NewHead(nil, db.logger, w, wbl, opts, NewHeadStats())
if err != nil {
return err
}
defer func() {
errs := tsdb_errors.NewMulti(returnErr)
if err := head.Close(); err != nil {
errs.Add(fmt.Errorf("closing Head: %w", err))
}
returnErr = errs.Err()
}()
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head.Init(maxBlockTime); err != nil {
return fmt.Errorf("read WAL: %w", err)
}
mint := head.MinTime()
maxt := head.MaxTime()
rh := NewRangeHead(head, mint, maxt)
compactor, err := NewLeveledCompactor(
context.Background(),
nil,
db.logger,
ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5),
chunkenc.NewPool(), nil,
)
if err != nil {
return fmt.Errorf("create leveled compactor: %w", err)
}
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
_, err = compactor.Write(dir, rh, mint, maxt+1, nil)
if err != nil {
return fmt.Errorf("writing WAL: %w", err)
}
return nil
}
func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQueryable, error) {
select {
case <-db.closed:
return nil, ErrClosed
default:
}
blockReaders, err := db.Blocks()
if err != nil {
return nil, err
}
blocks := make([]*Block, len(blockReaders))
for i, b := range blockReaders {
b, ok := b.(*Block)
if !ok {
return nil, errors.New("unable to convert a read only block to a normal block")
}
blocks[i] = b
}
opts := DefaultHeadOptions()
// Hard link the chunk files to a dir in db.sandboxDir in case the Head needs to truncate some of them
// or cut new ones while replaying the WAL.
// See https://github.com/prometheus/prometheus/issues/11618.
err = chunks.HardLinkChunkFiles(mmappedChunksDir(db.dir), mmappedChunksDir(db.sandboxDir))
if err != nil {
return nil, err
}
opts.ChunkDirRoot = db.sandboxDir
head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats())
if err != nil {
return nil, err
}
maxBlockTime := int64(math.MinInt64)
if len(blocks) > 0 {
maxBlockTime = blocks[len(blocks)-1].Meta().MaxTime
}
// Also add the WAL if the current blocks don't cover the requests time range.
if maxBlockTime <= maxt {
if err := head.Close(); err != nil {
return nil, err
}
w, err := wlog.Open(db.logger, filepath.Join(db.dir, "wal"))
if err != nil {
return nil, err
}
var wbl *wlog.WL
wblDir := filepath.Join(db.dir, wlog.WblDirName)
if _, err := os.Stat(wblDir); !os.IsNotExist(err) {
wbl, err = wlog.Open(db.logger, wblDir)
if err != nil {
return nil, err
}
}
opts := DefaultHeadOptions()
opts.ChunkDirRoot = db.sandboxDir
head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats())
if err != nil {
return nil, err
}
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head.Init(maxBlockTime); err != nil {
return nil, fmt.Errorf("read WAL: %w", err)
}
// Set the wal and the wbl to nil to disable related operations.
// This is mainly to avoid blocking when closing the head.
head.wal = nil
head.wbl = nil
}
db.closers = append(db.closers, head)
return &DB{
dir: db.dir,
logger: db.logger,
blocks: blocks,
head: head,
blockQuerierFunc: NewBlockQuerier,
blockChunkQuerierFunc: NewBlockChunkQuerier,
}, nil
}
// Querier loads the blocks and wal and returns a new querier over the data partition for the given time range.
// Current implementation doesn't support multiple Queriers.
func (db *DBReadOnly) Querier(mint, maxt int64) (storage.Querier, error) {
q, err := db.loadDataAsQueryable(maxt)
if err != nil {
return nil, err
}
return q.Querier(mint, maxt)
}
// ChunkQuerier loads blocks and the wal and returns a new chunk querier over the data partition for the given time range.
// Current implementation doesn't support multiple ChunkQueriers.
func (db *DBReadOnly) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
q, err := db.loadDataAsQueryable(maxt)
if err != nil {
return nil, err
}
return q.ChunkQuerier(mint, maxt)
}
// Blocks returns a slice of block readers for persisted blocks.
func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
select {
case <-db.closed:
return nil, ErrClosed
default:
}
loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil, DefaultPostingsDecoderFactory)
if err != nil {
return nil, err
}
// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
for _, block := range loadable {
for _, b := range block.Meta().Compaction.Parents {
delete(corrupted, b.ULID)
}
}
if len(corrupted) > 0 {
for _, b := range loadable {
if err := b.Close(); err != nil {
db.logger.Warn("Closing block failed", "err", err, "block", b)
}
}
errs := tsdb_errors.NewMulti()
for ulid, err := range corrupted {
if err != nil {
errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
}
}
return nil, errs.Err()
}
if len(loadable) == 0 {
return nil, nil
}
slices.SortFunc(loadable, func(a, b *Block) int {
switch {
case a.Meta().MinTime < b.Meta().MinTime:
return -1
case a.Meta().MinTime > b.Meta().MinTime:
return 1
default:
return 0
}
})
blockMetas := make([]BlockMeta, 0, len(loadable))
for _, b := range loadable {
blockMetas = append(blockMetas, b.Meta())
}
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
db.logger.Warn("Overlapping blocks found during opening", "detail", overlaps.String())
}
// Close all previously open readers and add the new ones to the cache.
for _, closer := range db.closers {
closer.Close()
}
blockClosers := make([]io.Closer, len(loadable))
blockReaders := make([]BlockReader, len(loadable))
for i, b := range loadable {
blockClosers[i] = b
blockReaders[i] = b
}
db.closers = blockClosers
return blockReaders, nil
}
// LastBlockID returns the BlockID of latest block.
func (db *DBReadOnly) LastBlockID() (string, error) {
entries, err := os.ReadDir(db.dir)
if err != nil {
return "", err
}
maxT := uint64(0)
lastBlockID := ""
for _, e := range entries {
// Check if dir is a block dir or not.
dirName := e.Name()
ulidObj, err := ulid.ParseStrict(dirName)
if err != nil {
continue // Not a block dir.
}
timestamp := ulidObj.Time()
if timestamp > maxT {
maxT = timestamp
lastBlockID = dirName
}
}
if lastBlockID == "" {
return "", errors.New("no blocks found")
}
return lastBlockID, nil
}
// Block returns a block reader by given block id.
func (db *DBReadOnly) Block(blockID string, postingsDecoderFactory PostingsDecoderFactory) (BlockReader, error) {
select {
case <-db.closed:
return nil, ErrClosed
default:
}
_, err := os.Stat(filepath.Join(db.dir, blockID))
if os.IsNotExist(err) {
return nil, fmt.Errorf("invalid block ID %s", blockID)
}
block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil, postingsDecoderFactory)
if err != nil {
return nil, err
}
db.closers = append(db.closers, block)
return block, nil
}
// Close all block readers and delete the sandbox dir.
func (db *DBReadOnly) Close() error {
defer func() {
// Delete the temporary sandbox directory that was created when opening the DB.
if err := os.RemoveAll(db.sandboxDir); err != nil {
db.logger.Error("delete sandbox dir", "err", err)
}
}()
select {
case <-db.closed:
return ErrClosed
default:
}
close(db.closed)
return tsdb_errors.CloseAll(db.closers)
}
// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
func Open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) {
var rngs []int64
opts, rngs = validateOpts(opts, nil)
// Register TSDB features if a registry is provided.
if opts.FeatureRegistry != nil {
opts.FeatureRegistry.Set(features.TSDB, "exemplar_storage", opts.EnableExemplarStorage)
opts.FeatureRegistry.Set(features.TSDB, "delayed_compaction", opts.EnableDelayedCompaction)
opts.FeatureRegistry.Set(features.TSDB, "isolation", !opts.IsolationDisabled)
opts.FeatureRegistry.Set(features.TSDB, "use_uncached_io", opts.UseUncachedIO)
opts.FeatureRegistry.Enable(features.TSDB, "native_histograms")
}
return open(dir, l, r, opts, rngs, stats)
}
func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
if opts == nil {
opts = DefaultOptions()
}
if opts.StripeSize <= 0 {
opts.StripeSize = DefaultStripeSize
}
if opts.HeadChunksWriteBufferSize <= 0 {
opts.HeadChunksWriteBufferSize = chunks.DefaultWriteBufferSize
}
if opts.HeadChunksWriteQueueSize < 0 {
opts.HeadChunksWriteQueueSize = chunks.DefaultWriteQueueSize
}
if opts.SamplesPerChunk <= 0 {
opts.SamplesPerChunk = DefaultSamplesPerChunk
}
if opts.MaxBlockChunkSegmentSize <= 0 {
opts.MaxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize
}
if opts.MinBlockDuration <= 0 {
opts.MinBlockDuration = DefaultBlockDuration
}
if opts.MinBlockDuration > opts.MaxBlockDuration {
opts.MaxBlockDuration = opts.MinBlockDuration
}
if opts.OutOfOrderCapMax <= 0 {
opts.OutOfOrderCapMax = DefaultOutOfOrderCapMax
}
if opts.OutOfOrderTimeWindow < 0 {
opts.OutOfOrderTimeWindow = 0
}
if opts.BlockReloadInterval < 1*time.Second {
opts.BlockReloadInterval = 1 * time.Second
}
if len(rngs) == 0 {
// Start with smallest block duration and create exponential buckets until the exceed the
// configured maximum block duration.
rngs = ExponentialBlockRanges(opts.MinBlockDuration, 10, 3)
}
return opts, rngs
}
// open returns a new DB in the given directory.
// It initializes the lockfile, WAL, compactor, and Head (by replaying the WAL), and runs the database.
// It is not safe to open more than one DB in the same directory.
func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) {
if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
if l == nil {
l = promslog.NewNopLogger()
}
if stats == nil {
stats = NewDBStats()
}
for i, v := range rngs {
if v > opts.MaxBlockDuration {
rngs = rngs[:i]
break
}
}
// Fixup bad format written by Prometheus 2.1.
if err := repairBadIndexVersion(l, dir); err != nil {
return nil, fmt.Errorf("repair bad index version: %w", err)
}
walDir := filepath.Join(dir, "wal")
wblDir := filepath.Join(dir, wlog.WblDirName)
for _, tmpDir := range []string{walDir, dir} {
// Remove tmp dirs.
if err := removeBestEffortTmpDirs(l, tmpDir); err != nil {
return nil, fmt.Errorf("remove tmp dirs: %w", err)
}
}
db := &DB{
dir: dir,
logger: l,
opts: opts,
compactc: make(chan struct{}, 1),
donec: make(chan struct{}),
stopc: make(chan struct{}),
autoCompact: true,
chunkPool: chunkenc.NewPool(),
blocksToDelete: opts.BlocksToDelete,
registerer: r,
}
defer func() {
// Close files if startup fails somewhere.
if returnedErr == nil {
return
}
close(db.donec) // DB is never run if it was an error, so close this channel here.
errs := tsdb_errors.NewMulti(returnedErr)
if err := db.Close(); err != nil {
errs.Add(fmt.Errorf("close DB after failed startup: %w", err))
}
returnedErr = errs.Err()
}()
if db.blocksToDelete == nil {
db.blocksToDelete = DefaultBlocksToDelete(db)
}
var err error
db.locker, err = tsdbutil.NewDirLocker(dir, "tsdb", db.logger, r)
if err != nil {
return nil, err
}
if !opts.NoLockfile {
if err := db.locker.Lock(); err != nil {
return nil, err
}
}
ctx, cancel := context.WithCancel(context.Background())
if opts.NewCompactorFunc != nil {
db.compactor, err = opts.NewCompactorFunc(ctx, r, l, rngs, db.chunkPool, opts)
} else {
db.compactor, err = NewLeveledCompactorWithOptions(ctx, r, l, rngs, db.chunkPool, LeveledCompactorOptions{
MaxBlockChunkSegmentSize: opts.MaxBlockChunkSegmentSize,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
PD: opts.PostingsDecoderFactory,
UseUncachedIO: opts.UseUncachedIO,
BlockExcludeFilter: opts.BlockCompactionExcludeFunc,
})
}
if err != nil {
cancel()
return nil, fmt.Errorf("create compactor: %w", err)
}
db.compactCancel = cancel
if opts.BlockQuerierFunc == nil {
db.blockQuerierFunc = NewBlockQuerier
} else {
db.blockQuerierFunc = opts.BlockQuerierFunc
}
if opts.BlockChunkQuerierFunc == nil {
db.blockChunkQuerierFunc = NewBlockChunkQuerier
} else {
db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc
}
var wal, wbl *wlog.WL
segmentSize := wlog.DefaultSegmentSize
// Wal is enabled.
if opts.WALSegmentSize >= 0 {
// Wal is set to a custom size.
if opts.WALSegmentSize > 0 {
segmentSize = opts.WALSegmentSize
}
wal, err = wlog.NewSize(l, r, walDir, segmentSize, opts.WALCompression)
if err != nil {
return nil, err
}
// Check if there is a WBL on disk, in which case we should replay that data.
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_bench_test.go | tsdb/head_bench_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"errors"
"fmt"
"math/rand"
"strconv"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/util/compression"
)
type benchAppendFunc func(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction
func appendV1Float(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
var err error
app := h.Appender(b.Context())
for _, s := range series {
var ref storage.SeriesRef
for sampleIndex := range samplesPerAppend {
ref, err = app.Append(ref, s.Labels(), ts+sampleIndex, float64(ts+sampleIndex))
require.NoError(b, err)
}
}
return app
}
func appendV2Float(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
var err error
app := h.AppenderV2(b.Context())
for _, s := range series {
var ref storage.SeriesRef
for sampleIndex := range samplesPerAppend {
ref, err = app.Append(ref, s.Labels(), 0, ts+sampleIndex, float64(ts+sampleIndex), nil, nil, storage.AOptions{})
require.NoError(b, err)
}
}
return app
}
func appendV1FloatOrHistogramWithExemplars(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
var err error
app := h.Appender(b.Context())
for i, s := range series {
var ref storage.SeriesRef
for sampleIndex := range samplesPerAppend {
// if i is even, append a sample, else append a histogram.
if i%2 == 0 {
ref, err = app.Append(ref, s.Labels(), ts+sampleIndex, float64(ts+sampleIndex))
require.NoError(b, err)
// Every sample also has an exemplar attached.
_, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
})
require.NoError(b, err)
continue
}
h := &histogram.Histogram{
Count: 7 + uint64(ts*5),
ZeroCount: 2 + uint64(ts),
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
}
ref, err = app.AppendHistogram(ref, s.Labels(), ts, h, nil)
require.NoError(b, err)
// Every histogram sample also has 3 exemplars attached.
_, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
})
require.NoError(b, err)
_, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
})
require.NoError(b, err)
_, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
})
require.NoError(b, err)
}
}
return app
}
func appendV2FloatOrHistogramWithExemplars(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
var (
err error
ex = make([]exemplar.Exemplar, 3)
)
app := h.AppenderV2(b.Context())
for i, s := range series {
var ref storage.SeriesRef
for sampleIndex := range samplesPerAppend {
aOpts := storage.AOptions{Exemplars: ex[:0]}
// if i is even, append a sample, else append a histogram.
if i%2 == 0 {
// Every sample also has an exemplar attached.
aOpts.Exemplars = append(aOpts.Exemplars, exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
})
ref, err = app.Append(ref, s.Labels(), 0, ts, float64(ts), nil, nil, aOpts)
require.NoError(b, err)
continue
}
h := &histogram.Histogram{
Count: 7 + uint64(ts*5),
ZeroCount: 2 + uint64(ts),
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
}
// Every histogram sample also has 3 exemplars attached.
aOpts.Exemplars = append(aOpts.Exemplars,
exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
},
exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
},
exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts + sampleIndex,
},
)
ref, err = app.Append(ref, s.Labels(), 0, ts, 0, h, nil, aOpts)
require.NoError(b, err)
}
}
return app
}
type appendCase struct {
name string
appendFunc benchAppendFunc
}
func appendCases() []appendCase {
return []appendCase{
{
name: "appender=v1/case=floats",
appendFunc: appendV1Float,
},
{
name: "appender=v2/case=floats",
appendFunc: appendV2Float,
},
{
name: "appender=v1/case=floatsHistogramsExemplars",
appendFunc: appendV1FloatOrHistogramWithExemplars,
},
{
name: "appender=v2/case=floatsHistogramsExemplars",
appendFunc: appendV2FloatOrHistogramWithExemplars,
},
}
}
/*
export bench=append && go test \
-run '^$' -bench '^BenchmarkHeadAppender_AppendCommit$' \
-benchtime 5s -count 6 -cpu 2 -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkHeadAppender_AppendCommit(b *testing.B) {
// NOTE(bwplotka): Previously we also had 1k and 10k series case. There is nothing
// special happening in 100 vs 1k vs 10k, so let's save considerable amount of benchmark time
// for quicker feedback. In return, we add more sample type cases.
// Similarly, we removed the 2 sample in append case.
//
// TODO(bwplotka): This still takes ~6500s (~2h) for -benchtime 5s -count 6 to complete.
// We might want to reduce the time bit more. 5s is really important as the slowest
// case (appender=v1/case=floatsHistogramsExemplars/series=100/samples_per_append=100-2)
// in 5s yields only 255 iters 23184892 ns/op. Perhaps -benchtime=300x would be better?
seriesCounts := []int{10, 100}
series := genSeries(100, 10, 0, 0) // Only using the generated labels.
for _, appendCase := range appendCases() {
for _, seriesCount := range seriesCounts {
for _, samplesPerAppend := range []int64{1, 5, 100} {
b.Run(fmt.Sprintf("%s/series=%d/samples_per_append=%d", appendCase.name, seriesCount, samplesPerAppend), func(b *testing.B) {
opts := newTestHeadDefaultOptions(10000, false)
opts.EnableExemplarStorage = true // We benchmark with exemplars, benchmark with them.
h, _ := newTestHeadWithOptions(b, compression.None, opts)
b.Cleanup(func() { require.NoError(b, h.Close()) })
ts := int64(1000)
// Init series, that's not what we're benchmarking here.
app := appendCase.appendFunc(b, h, ts, series[:seriesCount], samplesPerAppend)
require.NoError(b, app.Commit())
ts += 1000 // should increment more than highest samplesPerAppend
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
app := appendCase.appendFunc(b, h, ts, series[:seriesCount], samplesPerAppend)
require.NoError(b, app.Commit())
ts += 1000 // should increment more than highest samplesPerAppend
}
})
}
}
}
}
func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
chunkDir := b.TempDir()
// Put a series, select it. GC it and then access it.
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()
for i := 0; b.Loop(); i++ {
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i)), false)
}
}
func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) {
chunkDir := b.TempDir()
// Put a series, select it. GC it and then access it.
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()
var count atomic.Int64
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
i := count.Inc()
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(int(i))), false)
}
})
}
func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) {
chunkDir := b.TempDir()
// Put a series, select it. GC it and then access it.
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
// Mock the PreCreation() callback to fail on each series.
opts.SeriesCallback = failingSeriesLifecycleCallback{}
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()
for i := 0; b.Loop(); i++ {
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i)), false)
}
}
type failingSeriesLifecycleCallback struct{}
func (failingSeriesLifecycleCallback) PreCreation(labels.Labels) error { return errors.New("failed") }
func (failingSeriesLifecycleCallback) PostCreation(labels.Labels) {}
func (failingSeriesLifecycleCallback) PostDeletion(map[chunks.HeadSeriesRef]labels.Labels) {}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/tsdbblockutil.go | tsdb/tsdbblockutil.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"log/slog"
"path/filepath"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
var ErrInvalidTimes = errors.New("max time is lesser than min time")
// CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk.
func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) {
if chunkRange == 0 {
chunkRange = DefaultBlockDuration
}
if chunkRange < 0 {
return "", ErrInvalidTimes
}
w, err := NewBlockWriter(logger, dir, chunkRange)
if err != nil {
return "", err
}
defer func() {
if err := w.Close(); err != nil {
logger.Error("err closing blockwriter", "err", err.Error())
}
}()
sampleCount := 0
const commitAfter = 10000
ctx := context.Background()
app := w.Appender(ctx)
var it chunkenc.Iterator
for _, s := range series {
ref := storage.SeriesRef(0)
it = s.Iterator(it)
lset := s.Labels()
typ := it.Next()
lastTyp := typ
for ; typ != chunkenc.ValNone; typ = it.Next() {
if lastTyp != typ {
// The behaviour of appender is undefined if samples of different types
// are appended to the same series in a single Commit().
if err = app.Commit(); err != nil {
return "", err
}
app = w.Appender(ctx)
sampleCount = 0
}
switch typ {
case chunkenc.ValFloat:
t, v := it.At()
ref, err = app.Append(ref, lset, t, v)
case chunkenc.ValHistogram:
t, h := it.AtHistogram(nil)
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
case chunkenc.ValFloatHistogram:
t, fh := it.AtFloatHistogram(nil)
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
default:
return "", fmt.Errorf("unknown sample type %s", typ.String())
}
if err != nil {
return "", err
}
sampleCount++
lastTyp = typ
}
if it.Err() != nil {
return "", it.Err()
}
// Commit and make a new appender periodically, to avoid building up data in memory.
if sampleCount > commitAfter {
if err = app.Commit(); err != nil {
return "", err
}
app = w.Appender(ctx)
sampleCount = 0
}
}
if err = app.Commit(); err != nil {
return "", err
}
ulid, err := w.Flush(ctx)
if err != nil {
return "", err
}
return filepath.Join(dir, ulid.String()), nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/querier.go | tsdb/querier.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"math"
"slices"
"github.com/oklog/ulid/v2"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
)
// checkContextEveryNIterations is used in some tight loops to check if the context is done.
const checkContextEveryNIterations = 100
type blockBaseQuerier struct {
blockID ulid.ULID
index IndexReader
chunks ChunkReader
tombstones tombstones.Reader
closed bool
mint, maxt int64
}
func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, error) {
indexr, err := b.Index()
if err != nil {
return nil, fmt.Errorf("open index reader: %w", err)
}
chunkr, err := b.Chunks()
if err != nil {
indexr.Close()
return nil, fmt.Errorf("open chunk reader: %w", err)
}
tombsr, err := b.Tombstones()
if err != nil {
indexr.Close()
chunkr.Close()
return nil, fmt.Errorf("open tombstone reader: %w", err)
}
if tombsr == nil {
tombsr = tombstones.NewMemTombstones()
}
return &blockBaseQuerier{
blockID: b.Meta().ULID,
mint: mint,
maxt: maxt,
index: indexr,
chunks: chunkr,
tombstones: tombsr,
}, nil
}
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(ctx, name, hints, matchers...)
return res, nil, err
}
func (q *blockBaseQuerier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err
}
func (q *blockBaseQuerier) Close() error {
if q.closed {
return errors.New("block querier already closed")
}
errs := tsdb_errors.NewMulti(
q.index.Close(),
q.chunks.Close(),
q.tombstones.Close(),
)
q.closed = true
return errs.Err()
}
type blockQuerier struct {
*blockBaseQuerier
}
// NewBlockQuerier returns a querier against the block reader and requested min and max time range.
func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
q, err := newBlockBaseQuerier(b, mint, maxt)
if err != nil {
return nil, err
}
return &blockQuerier{blockBaseQuerier: q}, nil
}
func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.SeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrSeriesSet(err)
}
if sharded {
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = index.SortedPostings(p)
}
if hints != nil {
mint = hints.Start
maxt = hints.End
disableTrimming = hints.DisableTrimming
if hints.Func == "series" {
// When you're only looking up metadata (for example series API), you don't need to load any chunks.
return newBlockSeriesSet(index, newNopChunkReader(), tombstones, p, mint, maxt, disableTrimming)
}
}
return newBlockSeriesSet(index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// blockChunkQuerier provides chunk querying access to a single block database.
type blockChunkQuerier struct {
*blockBaseQuerier
}
// NewBlockChunkQuerier returns a chunk querier against the block reader and requested min and max time range.
func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) {
q, err := newBlockBaseQuerier(b, mint, maxt)
if err != nil {
return nil, err
}
return &blockChunkQuerier{blockBaseQuerier: q}, nil
}
func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
blockID ulid.ULID, index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.ChunkSeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
if hints != nil {
mint = hints.Start
maxt = hints.End
disableTrimming = hints.DisableTrimming
}
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
if sharded {
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = index.SortedPostings(p)
}
return NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// PostingsForMatchers assembles a single postings iterator against the index reader
// based on the given matchers. The resulting postings are not ordered by series.
func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
if len(ms) == 1 && ms[0].Name == "" && ms[0].Value == "" {
k, v := index.AllPostingsKey()
return ix.Postings(ctx, k, v)
}
var its, notIts []index.Postings
// See which label must be non-empty.
// Optimization for case like {l=~".", l!="1"}.
labelMustBeSet := make(map[string]bool, len(ms))
for _, m := range ms {
if !m.Matches("") {
labelMustBeSet[m.Name] = true
}
}
isSubtractingMatcher := func(m *labels.Matcher) bool {
if !labelMustBeSet[m.Name] {
return true
}
return (m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp) && m.Matches("")
}
hasSubtractingMatchers, hasIntersectingMatchers := false, false
for _, m := range ms {
if isSubtractingMatcher(m) {
hasSubtractingMatchers = true
} else {
hasIntersectingMatchers = true
}
}
if hasSubtractingMatchers && !hasIntersectingMatchers {
// If there's nothing to subtract from, add in everything and remove the notIts later.
// We prefer to get AllPostings so that the base of subtraction (i.e. allPostings)
// doesn't include series that may be added to the index reader during this function call.
k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(ctx, k, v)
if err != nil {
return nil, err
}
its = append(its, allPostings)
}
// Sort matchers to have the intersecting matchers first.
// This way the base for subtraction is smaller and
// there is no chance that the set we subtract from
// contains postings of series that didn't exist when
// we constructed the set we subtract by.
slices.SortStableFunc(ms, func(i, j *labels.Matcher) int {
if !isSubtractingMatcher(i) && isSubtractingMatcher(j) {
return -1
}
return +1
})
for _, m := range ms {
if ctx.Err() != nil {
return nil, ctx.Err()
}
switch {
case m.Name == "" && m.Value == "":
// We already handled the case at the top of the function,
// and it is unexpected to get all postings again here.
return nil, errors.New("unexpected all postings")
case m.Type == labels.MatchRegexp && m.Value == ".*":
// .* regexp matches any string: do nothing.
case m.Type == labels.MatchNotRegexp && m.Value == ".*":
return index.EmptyPostings(), nil
case m.Type == labels.MatchRegexp && m.Value == ".+":
// .+ regexp matches any non-empty string: get postings for all label values.
it := ix.PostingsForAllLabelValues(ctx, m.Name)
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it)
case m.Type == labels.MatchNotRegexp && m.Value == ".+":
// .+ regexp matches any non-empty string: get postings for all label values and remove them.
notIts = append(notIts, ix.PostingsForAllLabelValues(ctx, m.Name))
case labelMustBeSet[m.Name]:
// If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("")
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
switch {
case isNot && matchesEmpty: // l!="foo"
// If the label can't be empty and is a Not and the inner matcher
// doesn't match empty, then subtract it out at the end.
inverse, err := m.Inverse()
if err != nil {
return nil, err
}
it, err := postingsForMatcher(ctx, ix, inverse)
if err != nil {
return nil, err
}
notIts = append(notIts, it)
case isNot && !matchesEmpty: // l!=""
// If the label can't be empty and is a Not, but the inner matcher can
// be empty we need to use inversePostingsForMatcher.
inverse, err := m.Inverse()
if err != nil {
return nil, err
}
it, err := inversePostingsForMatcher(ctx, ix, inverse)
if err != nil {
return nil, err
}
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it)
default: // l="a", l=~"a|b", l=~"a.b", etc.
// Non-Not matcher, use normal postingsForMatcher.
it, err := postingsForMatcher(ctx, ix, m)
if err != nil {
return nil, err
}
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it)
}
default: // l=""
// If the matchers for a labelname selects an empty value, it selects all
// the series which don't have the label name set too. See:
// https://github.com/prometheus/prometheus/issues/3575 and
// https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
it, err := inversePostingsForMatcher(ctx, ix, m)
if err != nil {
return nil, err
}
notIts = append(notIts, it)
}
}
it := index.Intersect(its...)
for _, n := range notIts {
it = index.Without(it, n)
}
return it, nil
}
func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher) (index.Postings, error) {
// This method will not return postings for missing labels.
// Fast-path for equal matching.
if m.Type == labels.MatchEqual {
return ix.Postings(ctx, m.Name, m.Value)
}
// Fast-path for set matching.
if m.Type == labels.MatchRegexp {
setMatches := m.SetMatches()
if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...)
}
}
it := ix.PostingsForLabelMatching(ctx, m.Name, m.Matches)
return it, it.Err()
}
// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher) (index.Postings, error) {
// Fast-path for MatchNotRegexp matching.
// Inverse of a MatchNotRegexp is MatchRegexp (double negation).
// Fast-path for set matching.
if m.Type == labels.MatchNotRegexp {
setMatches := m.SetMatches()
if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...)
}
}
// Fast-path for MatchNotEqual matching.
// Inverse of a MatchNotEqual is MatchEqual (double negation).
if m.Type == labels.MatchNotEqual {
return ix.Postings(ctx, m.Name, m.Value)
}
// If the matcher being inverted is =~"" or ="", we just want all the values.
if m.Value == "" && (m.Type == labels.MatchRegexp || m.Type == labels.MatchEqual) {
it := ix.PostingsForAllLabelValues(ctx, m.Name)
return it, it.Err()
}
it := ix.PostingsForLabelMatching(ctx, m.Name, func(s string) bool {
return !m.Matches(s)
})
return it, it.Err()
}
func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
// Limit is applied at the end, after filtering.
allValues, err := r.LabelValues(ctx, name, nil)
if err != nil {
return nil, fmt.Errorf("fetching values of label %s: %w", name, err)
}
// If we have a matcher for the label name, we can filter out values that don't match
// before we fetch postings. This is especially useful for labels with many values.
// e.g. __name__ with a selector like {__name__="xyz"}
hasMatchersForOtherLabels := false
for _, m := range matchers {
if m.Name != name {
hasMatchersForOtherLabels = true
continue
}
// re-use the allValues slice to avoid allocations
// this is safe because the iteration is always ahead of the append
filteredValues := allValues[:0]
count := 1
for _, v := range allValues {
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return nil, ctx.Err()
}
count++
if m.Matches(v) {
filteredValues = append(filteredValues, v)
}
}
allValues = filteredValues
}
if len(allValues) == 0 {
return nil, nil
}
// If we don't have any matchers for other labels, then we're done.
if !hasMatchersForOtherLabels {
if hints != nil && hints.Limit > 0 && len(allValues) > hints.Limit {
allValues = allValues[:hints.Limit]
}
return allValues, nil
}
p, err := PostingsForMatchers(ctx, r, matchers...)
if err != nil {
return nil, fmt.Errorf("fetching postings for matchers: %w", err)
}
valuesPostings := make([]index.Postings, len(allValues))
for i, value := range allValues {
valuesPostings[i], err = r.Postings(ctx, name, value)
if err != nil {
return nil, fmt.Errorf("fetching postings for %s=%q: %w", name, value, err)
}
}
indexes, err := index.FindIntersectingPostings(p, valuesPostings)
if err != nil {
return nil, fmt.Errorf("intersecting postings: %w", err)
}
values := make([]string, 0, len(indexes))
for _, idx := range indexes {
values = append(values, allValues[idx])
if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit {
break
}
}
return values, nil
}
func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*labels.Matcher) ([]string, error) {
p, err := PostingsForMatchers(ctx, r, matchers...)
if err != nil {
return nil, err
}
return r.LabelNamesFor(ctx, p)
}
// seriesData, used inside other iterators, are updated when we move from one series to another.
type seriesData struct {
chks []chunks.Meta
intervals tombstones.Intervals
labels labels.Labels
}
// Labels implements part of storage.Series and storage.ChunkSeries.
func (s *seriesData) Labels() labels.Labels { return s.labels }
// blockBaseSeriesSet allows to iterate over all series in the single block.
// Iterated series are trimmed with given min and max time as well as tombstones.
// See newBlockSeriesSet and NewBlockChunkSeriesSet to use it for either sample or chunk iterating.
type blockBaseSeriesSet struct {
blockID ulid.ULID
p index.Postings
index IndexReader
chunks ChunkReader
tombstones tombstones.Reader
mint, maxt int64
disableTrimming bool
curr seriesData
bufChks []chunks.Meta
builder labels.ScratchBuilder
err error
}
func (b *blockBaseSeriesSet) Next() bool {
for b.p.Next() {
if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil {
// Postings may be stale. Skip if no underlying series exists.
if errors.Is(err, storage.ErrNotFound) {
continue
}
b.err = fmt.Errorf("get series %d: %w", b.p.At(), err)
return false
}
if len(b.bufChks) == 0 {
continue
}
intervals, err := b.tombstones.Get(b.p.At())
if err != nil {
b.err = fmt.Errorf("get tombstones: %w", err)
return false
}
// NOTE:
// * block time range is half-open: [meta.MinTime, meta.MaxTime).
// * chunks are both closed: [chk.MinTime, chk.MaxTime].
// * requested time ranges are closed: [req.Start, req.End].
var trimFront, trimBack bool
// Copy chunks as iterables are reusable.
// Count those in range to size allocation (roughly - ignoring tombstones).
nChks := 0
for _, chk := range b.bufChks {
if chk.MaxTime >= b.mint && chk.MinTime <= b.maxt {
nChks++
}
}
chks := make([]chunks.Meta, 0, nChks)
// Prefilter chunks and pick those which are not entirely deleted or totally outside of the requested range.
for _, chk := range b.bufChks {
if chk.MaxTime < b.mint {
continue
}
if chk.MinTime > b.maxt {
continue
}
if (tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(intervals)) {
continue
}
chks = append(chks, chk)
// If still not entirely deleted, check if trim is needed based on requested time range.
if !b.disableTrimming {
if chk.MinTime < b.mint {
trimFront = true
}
if chk.MaxTime > b.maxt {
trimBack = true
}
}
}
if len(chks) == 0 {
continue
}
if trimFront {
intervals = intervals.Add(tombstones.Interval{Mint: math.MinInt64, Maxt: b.mint - 1})
}
if trimBack {
intervals = intervals.Add(tombstones.Interval{Mint: b.maxt + 1, Maxt: math.MaxInt64})
}
b.curr.labels = b.builder.Labels()
b.curr.chks = chks
b.curr.intervals = intervals
return true
}
return false
}
func (b *blockBaseSeriesSet) Err() error {
if b.err != nil {
return b.err
}
return b.p.Err()
}
func (*blockBaseSeriesSet) Warnings() annotations.Annotations { return nil }
// populateWithDelGenericSeriesIterator allows to iterate over given chunk
// metas. In each iteration it ensures that chunks are trimmed based on given
// tombstones interval if any.
//
// populateWithDelGenericSeriesIterator assumes that chunks that would be fully
// removed by intervals are filtered out in previous phase.
//
// On each iteration currMeta is available. If currDelIter is not nil, it
// means that the chunk in currMeta is invalid and a chunk rewrite is needed,
// for which currDelIter should be used.
type populateWithDelGenericSeriesIterator struct {
blockID ulid.ULID
cr ChunkReader
// metas are expected to be sorted by minTime and should be related to
// the same, single series.
// It's possible for a single chunks.Meta to refer to multiple chunks.
// cr.ChunkOrIterator() would return an iterable and a nil chunk in this
// case.
metas []chunks.Meta
i int // Index into metas; -1 if not started yet.
err error
bufIter DeletedIterator // Retained for memory re-use. currDelIter may point here.
intervals tombstones.Intervals
currDelIter chunkenc.Iterator
// currMeta is the current chunks.Meta from metas. currMeta.Chunk is set to
// the chunk returned from cr.ChunkOrIterable(). As that can return a nil
// chunk, currMeta.Chunk is not always guaranteed to be set.
currMeta chunks.Meta
}
func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.blockID = blockID
p.cr = cr
p.metas = chks
p.i = -1
p.err = nil
// Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next().
p.bufIter.Intervals = p.bufIter.Intervals[:0]
p.intervals = intervals
p.currDelIter = nil
p.currMeta = chunks.Meta{}
}
// If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB)
// is deep copied to avoid races between reads and copying chunk bytes.
// However, if the deletion intervals overlaps with the head chunk, then the head chunk is
// not copied irrespective of copyHeadChunk because it will be re-encoded later anyway.
func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
if p.err != nil || p.i >= len(p.metas)-1 {
return false
}
p.i++
p.currMeta = p.metas[p.i]
p.bufIter.Intervals = p.bufIter.Intervals[:0]
for _, interval := range p.intervals {
if p.currMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
p.bufIter.Intervals = p.bufIter.Intervals.Add(interval)
}
}
hcr, ok := p.cr.(ChunkReaderWithCopy)
var iterable chunkenc.Iterable
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
// ChunkOrIterableWithCopy will copy the head chunk, if it can.
var maxt int64
p.currMeta.Chunk, iterable, maxt, p.err = hcr.ChunkOrIterableWithCopy(p.currMeta)
if p.currMeta.Chunk != nil {
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
p.currMeta.MaxTime = maxt
}
} else {
p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta)
}
if p.err != nil {
p.err = fmt.Errorf("cannot populate chunk %d from block %s: %w", p.currMeta.Ref, p.blockID.String(), p.err)
return false
}
// Use the single chunk if possible.
if p.currMeta.Chunk != nil {
if len(p.bufIter.Intervals) == 0 {
// If there is no overlap with deletion intervals and a single chunk is
// returned, we can take chunk as it is.
p.currDelIter = nil
return true
}
// Otherwise we need to iterate over the samples in the single chunk
// and create new chunks.
p.bufIter.Iter = p.currMeta.Chunk.Iterator(p.bufIter.Iter)
p.currDelIter = &p.bufIter
return true
}
// Otherwise, use the iterable to create an iterator.
p.bufIter.Iter = iterable.Iterator(p.bufIter.Iter)
p.currDelIter = &p.bufIter
return true
}
func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err }
type blockSeriesEntry struct {
chunks ChunkReader
blockID ulid.ULID
seriesData
}
func (s *blockSeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
pi, ok := it.(*populateWithDelSeriesIterator)
if !ok {
pi = &populateWithDelSeriesIterator{}
}
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
return pi
}
type chunkSeriesEntry struct {
chunks ChunkReader
blockID ulid.ULID
seriesData
}
func (s *chunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator {
pi, ok := it.(*populateWithDelChunkSeriesIterator)
if !ok {
pi = &populateWithDelChunkSeriesIterator{}
}
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
return pi
}
// populateWithDelSeriesIterator allows to iterate over samples for the single series.
type populateWithDelSeriesIterator struct {
populateWithDelGenericSeriesIterator
curr chunkenc.Iterator
}
func (p *populateWithDelSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
p.curr = nil
}
func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
if p.curr != nil {
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
return valueType
}
}
for p.next(false) {
if p.currDelIter != nil {
p.curr = p.currDelIter
} else {
p.curr = p.currMeta.Chunk.Iterator(p.curr)
}
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
return valueType
}
}
return chunkenc.ValNone
}
func (p *populateWithDelSeriesIterator) Seek(t int64) chunkenc.ValueType {
if p.curr != nil {
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
return valueType
}
}
for p.Next() != chunkenc.ValNone {
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
return valueType
}
}
return chunkenc.ValNone
}
func (p *populateWithDelSeriesIterator) At() (int64, float64) {
return p.curr.At()
}
func (p *populateWithDelSeriesIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
return p.curr.AtHistogram(h)
}
func (p *populateWithDelSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return p.curr.AtFloatHistogram(fh)
}
func (p *populateWithDelSeriesIterator) AtT() int64 {
return p.curr.AtT()
}
func (p *populateWithDelSeriesIterator) Err() error {
if err := p.populateWithDelGenericSeriesIterator.Err(); err != nil {
return err
}
if p.curr != nil {
return p.curr.Err()
}
return nil
}
type populateWithDelChunkSeriesIterator struct {
populateWithDelGenericSeriesIterator
// currMetaWithChunk is current meta with its chunk field set. This meta
// is guaranteed to map to a single chunk. This differs from
// populateWithDelGenericSeriesIterator.currMeta as that
// could refer to multiple chunks.
currMetaWithChunk chunks.Meta
// chunksFromIterable stores the chunks created from iterating through
// the iterable returned by cr.ChunkOrIterable() (with deleted samples
// removed).
chunksFromIterable []chunks.Meta
chunksFromIterableIdx int
}
func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
p.currMetaWithChunk = chunks.Meta{}
p.chunksFromIterable = p.chunksFromIterable[:0]
p.chunksFromIterableIdx = -1
}
func (p *populateWithDelChunkSeriesIterator) Next() bool {
if p.currMeta.Chunk == nil {
// If we've been creating chunks from the iterable, check if there are
// any more chunks to iterate through.
if p.chunksFromIterableIdx < len(p.chunksFromIterable)-1 {
p.chunksFromIterableIdx++
p.currMetaWithChunk = p.chunksFromIterable[p.chunksFromIterableIdx]
return true
}
}
// Move to the next chunk/deletion iterator.
// This is a for loop as if the current p.currDelIter returns no samples
// (which means a chunk won't be created), there still might be more
// samples/chunks from the rest of p.metas.
for p.next(true) {
if p.currDelIter == nil {
p.currMetaWithChunk = p.currMeta
return true
}
if p.currMeta.Chunk != nil {
// If ChunkOrIterable() returned a non-nil chunk, the samples in
// p.currDelIter will only form one chunk, as the only change
// p.currDelIter might make is deleting some samples.
if p.populateCurrForSingleChunk() {
return true
}
} else {
// If ChunkOrIterable() returned an iterable, multiple chunks may be
// created from the samples in p.currDelIter.
if p.populateChunksFromIterable() {
return true
}
}
}
return false
}
// populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This
// should be called if the samples in p.currDelIter only form one chunk.
func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
valueType := p.currDelIter.Next()
if valueType == chunkenc.ValNone {
if err := p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
}
return false
}
p.currMetaWithChunk.MinTime = p.currDelIter.AtT()
// Re-encode the chunk if iterator is provided. This means that it has
// some samples to be deleted or chunk is opened.
var (
newChunk chunkenc.Chunk
app chunkenc.Appender
t int64
err error
)
switch valueType {
case chunkenc.ValHistogram:
newChunk = chunkenc.NewHistogramChunk()
if app, err = newChunk.Appender(); err != nil {
break
}
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt)
break
}
var h *histogram.Histogram
t, h = p.currDelIter.AtHistogram(nil)
_, _, app, err = app.AppendHistogram(nil, t, h, true)
if err != nil {
break
}
}
case chunkenc.ValFloat:
newChunk = chunkenc.NewXORChunk()
if app, err = newChunk.Appender(); err != nil {
break
}
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloat {
err = fmt.Errorf("found value type %v in float chunk", vt)
break
}
var v float64
t, v = p.currDelIter.At()
app.Append(t, v)
}
case chunkenc.ValFloatHistogram:
newChunk = chunkenc.NewFloatHistogramChunk()
if app, err = newChunk.Appender(); err != nil {
break
}
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloatHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt)
break
}
var h *histogram.FloatHistogram
t, h = p.currDelIter.AtFloatHistogram(nil)
_, _, app, err = app.AppendFloatHistogram(nil, t, h, true)
if err != nil {
break
}
}
default:
err = fmt.Errorf("populateCurrForSingleChunk: value type %v unsupported", valueType)
}
if err != nil {
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
return false
}
if err := p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
return false
}
p.currMetaWithChunk.Chunk = newChunk
p.currMetaWithChunk.MaxTime = t
return true
}
// populateChunksFromIterable reads the samples from currDelIter to create
// chunks for chunksFromIterable. It also sets p.currMetaWithChunk to the first
// chunk.
func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
p.chunksFromIterable = p.chunksFromIterable[:0]
p.chunksFromIterableIdx = -1
firstValueType := p.currDelIter.Next()
if firstValueType == chunkenc.ValNone {
if err := p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("populateChunksFromIterable: no samples could be read: %w", err)
return false
}
return false
}
var (
// t is the timestamp for the current sample.
t int64
cmint int64
cmaxt int64
currentChunk chunkenc.Chunk
app chunkenc.Appender
newChunk chunkenc.Chunk
recoded bool
err error
)
prevValueType := chunkenc.ValNone
for currentValueType := firstValueType; currentValueType != chunkenc.ValNone; currentValueType = p.currDelIter.Next() {
// Check if the encoding has changed (i.e. we need to create a new
// chunk as chunks can't have multiple encoding types).
// For the first sample, the following condition will always be true as
// ValNone != ValFloat | ValHistogram | ValFloatHistogram.
if currentValueType != prevValueType {
if prevValueType != chunkenc.ValNone {
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
}
cmint = p.currDelIter.AtT()
if currentChunk, err = currentValueType.NewChunk(); err != nil {
break
}
if app, err = currentChunk.Appender(); err != nil {
break
}
}
switch currentValueType {
case chunkenc.ValFloat:
{
var v float64
t, v = p.currDelIter.At()
app.Append(t, v)
}
case chunkenc.ValHistogram:
{
var v *histogram.Histogram
t, v = p.currDelIter.AtHistogram(nil)
// No need to set prevApp as AppendHistogram will set the
// counter reset header for the appender that's returned.
newChunk, recoded, app, err = app.AppendHistogram(nil, t, v, false)
}
case chunkenc.ValFloatHistogram:
{
var v *histogram.FloatHistogram
t, v = p.currDelIter.AtFloatHistogram(nil)
// No need to set prevApp as AppendHistogram will set the
// counter reset header for the appender that's returned.
newChunk, recoded, app, err = app.AppendFloatHistogram(nil, t, v, false)
}
}
if err != nil {
break
}
if newChunk != nil {
if !recoded {
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
cmint = t
}
currentChunk = newChunk
}
cmaxt = t
prevValueType = currentValueType
}
if err != nil {
p.err = fmt.Errorf("populateChunksFromIterable: error when writing new chunks: %w", err)
return false
}
if err = p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("populateChunksFromIterable: currDelIter error when writing new chunks: %w", err)
return false
}
if prevValueType != chunkenc.ValNone {
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
}
if len(p.chunksFromIterable) == 0 {
return false
}
p.currMetaWithChunk = p.chunksFromIterable[0]
p.chunksFromIterableIdx = 0
return true
}
func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.currMetaWithChunk }
// blockSeriesSet allows to iterate over sorted, populated series with applied tombstones.
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/isolation.go | tsdb/isolation.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"math"
"sync"
)
// isolationState holds the isolation information.
type isolationState struct {
// We will ignore all appends above the max, or that are incomplete.
maxAppendID uint64
incompleteAppends map[uint64]struct{}
lowWatermark uint64 // Lowest of incompleteAppends/maxAppendID.
isolation *isolation
mint, maxt int64 // Time ranges of the read.
// Doubly linked list of active reads.
next *isolationState
prev *isolationState
}
// Close closes the state.
func (i *isolationState) Close() {
i.isolation.readMtx.Lock()
defer i.isolation.readMtx.Unlock()
i.next.prev = i.prev
i.prev.next = i.next
}
func (i *isolationState) IsolationDisabled() bool {
return i.isolation.disabled
}
type isolationAppender struct {
appendID uint64
minTime int64
prev *isolationAppender
next *isolationAppender
}
// isolation is the global isolation state.
type isolation struct {
// Mutex for accessing lastAppendID and appendsOpen.
appendMtx sync.RWMutex
// Which appends are currently in progress.
appendsOpen map[uint64]*isolationAppender
// New appenders with higher appendID are added to the end. First element keeps lastAppendId.
// appendsOpenList.next points to the first element and appendsOpenList.prev points to the last element.
// If there are no appenders, both point back to appendsOpenList.
appendsOpenList *isolationAppender
// Pool of reusable *isolationAppender to save on allocations.
appendersPool sync.Pool
// Mutex for accessing readsOpen.
// If taking both appendMtx and readMtx, take appendMtx first.
readMtx sync.RWMutex
// All current in use isolationStates. This is a doubly-linked list.
readsOpen *isolationState
// If true, writes are not tracked while reads are still tracked.
disabled bool
}
func newIsolation(disabled bool) *isolation {
isoState := &isolationState{}
isoState.next = isoState
isoState.prev = isoState
appender := &isolationAppender{}
appender.next = appender
appender.prev = appender
return &isolation{
appendsOpen: map[uint64]*isolationAppender{},
appendsOpenList: appender,
readsOpen: isoState,
disabled: disabled,
appendersPool: sync.Pool{New: func() any { return &isolationAppender{} }},
}
}
// lowWatermark returns the appendID below which we no longer need to track
// which appends were from which appendID.
func (i *isolation) lowWatermark() uint64 {
if i.disabled {
return 0
}
i.appendMtx.RLock() // Take appendMtx first.
defer i.appendMtx.RUnlock()
return i.lowWatermarkLocked()
}
func (i *isolation) lowWatermarkLocked() uint64 {
if i.disabled {
return 0
}
i.readMtx.RLock()
defer i.readMtx.RUnlock()
if i.readsOpen.prev != i.readsOpen {
return i.readsOpen.prev.lowWatermark
}
// Lowest appendID from appenders, or lastAppendId.
return i.appendsOpenList.next.appendID
}
// lowestAppendTime returns the lowest minTime for any open appender,
// or math.MaxInt64 if no open appenders.
func (i *isolation) lowestAppendTime() int64 {
var lowest int64 = math.MaxInt64
i.appendMtx.RLock()
defer i.appendMtx.RUnlock()
for a := i.appendsOpenList.next; a != i.appendsOpenList; a = a.next {
if lowest > a.minTime {
lowest = a.minTime
}
}
return lowest
}
// State returns an object used to control isolation
// between a query and appends. Must be closed when complete.
func (i *isolation) State(mint, maxt int64) *isolationState {
i.appendMtx.RLock() // Take append mutex before read mutex.
defer i.appendMtx.RUnlock()
// We need to track reads even when isolation is disabled, so that head
// truncation can wait till reads overlapping that range have finished.
isoState := &isolationState{
maxAppendID: i.appendsOpenList.appendID,
lowWatermark: i.appendsOpenList.next.appendID, // Lowest appendID from appenders, or lastAppendId.
incompleteAppends: make(map[uint64]struct{}, len(i.appendsOpen)),
isolation: i,
mint: mint,
maxt: maxt,
}
for k := range i.appendsOpen {
isoState.incompleteAppends[k] = struct{}{}
}
i.readMtx.Lock()
defer i.readMtx.Unlock()
isoState.prev = i.readsOpen
isoState.next = i.readsOpen.next
i.readsOpen.next.prev = isoState
i.readsOpen.next = isoState
return isoState
}
// TraverseOpenReads iterates through the open reads and runs the given
// function on those states. The given function MUST NOT mutate the isolationState.
// The iteration is stopped when the function returns false or once all reads have been iterated.
func (i *isolation) TraverseOpenReads(f func(s *isolationState) bool) {
i.readMtx.RLock()
defer i.readMtx.RUnlock()
s := i.readsOpen.next
for s != i.readsOpen {
if !f(s) {
return
}
s = s.next
}
}
// newAppendID increments the transaction counter and returns a new transaction
// ID. The first ID returned is 1.
// Also returns the low watermark, to keep lock/unlock operations down.
func (i *isolation) newAppendID(minTime int64) (uint64, uint64) {
if i.disabled {
return 0, 0
}
i.appendMtx.Lock()
defer i.appendMtx.Unlock()
// Last used appendID is stored in head element.
i.appendsOpenList.appendID++
app := i.appendersPool.Get().(*isolationAppender)
app.appendID = i.appendsOpenList.appendID
app.minTime = minTime
app.prev = i.appendsOpenList.prev
app.next = i.appendsOpenList
i.appendsOpenList.prev.next = app
i.appendsOpenList.prev = app
i.appendsOpen[app.appendID] = app
return app.appendID, i.lowWatermarkLocked()
}
func (i *isolation) lastAppendID() uint64 {
if i.disabled {
return 0
}
i.appendMtx.RLock()
defer i.appendMtx.RUnlock()
return i.appendsOpenList.appendID
}
func (i *isolation) closeAppend(appendID uint64) {
if i.disabled {
return
}
i.appendMtx.Lock()
defer i.appendMtx.Unlock()
app := i.appendsOpen[appendID]
if app != nil {
app.prev.next = app.next
app.next.prev = app.prev
delete(i.appendsOpen, appendID)
// Clear all fields, and return to the pool.
*app = isolationAppender{}
i.appendersPool.Put(app)
}
}
// The transactionID ring buffer.
type txRing struct {
txIDs []uint64
txIDFirst uint32 // Position of the first id in the ring.
txIDCount uint32 // How many ids in the ring.
}
func newTxRing(capacity int) *txRing {
return &txRing{
txIDs: make([]uint64, capacity),
}
}
func (txr *txRing) add(appendID uint64) {
if int(txr.txIDCount) == len(txr.txIDs) {
// Ring buffer is full, expand by doubling.
newLen := txr.txIDCount * 2
if newLen == 0 {
newLen = 4
}
newRing := make([]uint64, newLen)
idx := copy(newRing, txr.txIDs[txr.txIDFirst:])
copy(newRing[idx:], txr.txIDs[:txr.txIDFirst])
txr.txIDs = newRing
txr.txIDFirst = 0
}
txr.txIDs[int(txr.txIDFirst+txr.txIDCount)%len(txr.txIDs)] = appendID
txr.txIDCount++
}
func (txr *txRing) cleanupAppendIDsBelow(bound uint64) {
if len(txr.txIDs) == 0 {
return
}
pos := int(txr.txIDFirst)
for txr.txIDCount > 0 {
if txr.txIDs[pos] >= bound {
break
}
txr.txIDFirst++
txr.txIDCount--
pos++
if pos == len(txr.txIDs) {
pos = 0
}
}
txr.txIDFirst %= uint32(len(txr.txIDs))
}
func (txr *txRing) iterator() *txRingIterator {
return &txRingIterator{
pos: txr.txIDFirst,
ids: txr.txIDs,
}
}
// txRingIterator lets you iterate over the ring. It doesn't terminate,
// it DOESN'T terminate.
type txRingIterator struct {
ids []uint64
pos uint32
}
func (it *txRingIterator) At() uint64 {
return it.ids[it.pos]
}
func (it *txRingIterator) Next() {
it.pos++
if int(it.pos) == len(it.ids) {
it.pos = 0
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/compact_test.go | tsdb/compact_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"math"
"math/rand"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"sync"
"testing"
"time"
"github.com/oklog/ulid/v2"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/compression"
)
func TestSplitByRange(t *testing.T) {
cases := []struct {
trange int64
ranges [][2]int64
output [][][2]int64
}{
{
trange: 60,
ranges: [][2]int64{{0, 10}},
output: [][][2]int64{
{{0, 10}},
},
},
{
trange: 60,
ranges: [][2]int64{{0, 60}},
output: [][][2]int64{
{{0, 60}},
},
},
{
trange: 60,
ranges: [][2]int64{{0, 10}, {9, 15}, {30, 60}},
output: [][][2]int64{
{{0, 10}, {9, 15}, {30, 60}},
},
},
{
trange: 60,
ranges: [][2]int64{{70, 90}, {125, 130}, {130, 180}, {1000, 1001}},
output: [][][2]int64{
{{70, 90}},
{{125, 130}, {130, 180}},
{{1000, 1001}},
},
},
// Mis-aligned or too-large blocks are ignored.
{
trange: 60,
ranges: [][2]int64{{50, 70}, {70, 80}},
output: [][][2]int64{
{{70, 80}},
},
},
{
trange: 72,
ranges: [][2]int64{{0, 144}, {144, 216}, {216, 288}},
output: [][][2]int64{
{{144, 216}},
{{216, 288}},
},
},
// Various awkward edge cases easy to hit with negative numbers.
{
trange: 60,
ranges: [][2]int64{{-10, -5}},
output: [][][2]int64{
{{-10, -5}},
},
},
{
trange: 60,
ranges: [][2]int64{{-60, -50}, {-10, -5}},
output: [][][2]int64{
{{-60, -50}, {-10, -5}},
},
},
{
trange: 60,
ranges: [][2]int64{{-60, -50}, {-10, -5}, {0, 15}},
output: [][][2]int64{
{{-60, -50}, {-10, -5}},
{{0, 15}},
},
},
}
for _, c := range cases {
// Transform input range tuples into dirMetas.
blocks := make([]dirMeta, 0, len(c.ranges))
for _, r := range c.ranges {
blocks = append(blocks, dirMeta{
meta: &BlockMeta{
MinTime: r[0],
MaxTime: r[1],
},
})
}
// Transform output range tuples into dirMetas.
exp := make([][]dirMeta, len(c.output))
for i, group := range c.output {
for _, r := range group {
exp[i] = append(exp[i], dirMeta{
meta: &BlockMeta{MinTime: r[0], MaxTime: r[1]},
})
}
}
require.Equal(t, exp, splitByRange(blocks, c.trange))
}
}
// See https://github.com/prometheus/prometheus/issues/3064
func TestNoPanicFor0Tombstones(t *testing.T) {
metas := []dirMeta{
{
dir: "1",
meta: &BlockMeta{
MinTime: 0,
MaxTime: 100,
},
},
{
dir: "2",
meta: &BlockMeta{
MinTime: 101,
MaxTime: 200,
},
},
}
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil, nil)
require.NoError(t, err)
c.plan(metas)
}
func TestLeveledCompactor_plan(t *testing.T) {
// This mimics our default ExponentialBlockRanges with min block size equals to 20.
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{
20,
60,
180,
540,
1620,
}, nil, nil)
require.NoError(t, err)
cases := map[string]struct {
metas []dirMeta
expected []string
}{
"Outside Range": {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
},
expected: nil,
},
"We should wait for four blocks of size 20 to appear before compacting.": {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
},
expected: nil,
},
`We should wait for a next block of size 20 to appear before compacting
the existing ones. We have three, but we ignore the fresh one from WAl`: {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 40, 60, nil),
},
expected: nil,
},
"Block to fill the entire parent range appeared – should be compacted": {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 40, 60, nil),
metaRange("4", 60, 80, nil),
},
expected: []string{"1", "2", "3"},
},
`Block for the next parent range appeared with gap with size 20. Nothing will happen in the first one
anymore but we ignore fresh one still, so no compaction`: {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 60, 80, nil),
},
expected: nil,
},
`Block for the next parent range appeared, and we have a gap with size 20 between second and third block.
We will not get this missed gap anymore and we should compact just these two.`: {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 60, 80, nil),
metaRange("4", 80, 100, nil),
},
expected: []string{"1", "2"},
},
"We have 20, 20, 20, 60, 60 range blocks. '5' is marked as fresh one": {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 40, 60, nil),
metaRange("4", 60, 120, nil),
metaRange("5", 120, 180, nil),
},
expected: []string{"1", "2", "3"},
},
"We have 20, 60, 20, 60, 240 range blocks. We can compact 20 + 60 + 60": {
metas: []dirMeta{
metaRange("2", 20, 40, nil),
metaRange("4", 60, 120, nil),
metaRange("5", 960, 980, nil), // Fresh one.
metaRange("6", 120, 180, nil),
metaRange("7", 720, 960, nil),
},
expected: []string{"2", "4", "6"},
},
"Do not select large blocks that have many tombstones when there is no fresh block": {
metas: []dirMeta{
metaRange("1", 0, 540, &BlockStats{
NumSeries: 10,
NumTombstones: 3,
}),
},
expected: nil,
},
"Select large blocks that have many tombstones when fresh appears": {
metas: []dirMeta{
metaRange("1", 0, 540, &BlockStats{
NumSeries: 10,
NumTombstones: 3,
}),
metaRange("2", 540, 560, nil),
},
expected: []string{"1"},
},
"For small blocks, do not compact tombstones, even when fresh appears.": {
metas: []dirMeta{
metaRange("1", 0, 60, &BlockStats{
NumSeries: 10,
NumTombstones: 3,
}),
metaRange("2", 60, 80, nil),
},
expected: nil,
},
`Regression test: we were stuck in a compact loop where we always recompacted
the same block when tombstones and series counts were zero`: {
metas: []dirMeta{
metaRange("1", 0, 540, &BlockStats{
NumSeries: 0,
NumTombstones: 0,
}),
metaRange("2", 540, 560, nil),
},
expected: nil,
},
`Regression test: we were wrongly assuming that new block is fresh from WAL when its ULID is newest.
We need to actually look on max time instead.
With previous, wrong approach "8" block was ignored, so we were wrongly compacting 5 and 7 and introducing
block overlaps`: {
metas: []dirMeta{
metaRange("5", 0, 360, nil),
metaRange("6", 540, 560, nil), // Fresh one.
metaRange("7", 360, 420, nil),
metaRange("8", 420, 540, nil),
},
expected: []string{"7", "8"},
},
// |--------------|
// |----------------|
// |--------------|
"Overlapping blocks 1": {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 19, 40, nil),
metaRange("3", 40, 60, nil),
},
expected: []string{"1", "2"},
},
// |--------------|
// |--------------|
// |--------------|
"Overlapping blocks 2": {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 30, 50, nil),
},
expected: []string{"2", "3"},
},
// |--------------|
// |---------------------|
// |--------------|
"Overlapping blocks 3": {
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 10, 40, nil),
metaRange("3", 30, 50, nil),
},
expected: []string{"1", "2", "3"},
},
// |--------------|
// |--------------------------------|
// |--------------|
// |--------------|
"Overlapping blocks 4": {
metas: []dirMeta{
metaRange("5", 0, 360, nil),
metaRange("6", 340, 560, nil),
metaRange("7", 360, 420, nil),
metaRange("8", 420, 540, nil),
},
expected: []string{"5", "6", "7", "8"},
},
// |--------------|
// |--------------|
// |--------------|
// |--------------|
"Overlapping blocks 5": {
metas: []dirMeta{
metaRange("1", 0, 10, nil),
metaRange("2", 9, 20, nil),
metaRange("3", 30, 40, nil),
metaRange("4", 39, 50, nil),
},
expected: []string{"1", "2"},
},
}
for title, c := range cases {
if !t.Run(title, func(t *testing.T) {
res, err := compactor.plan(c.metas)
require.NoError(t, err)
require.Equal(t, c.expected, res)
}) {
return
}
}
}
func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) {
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{
20,
60,
240,
720,
2160,
}, nil, nil)
require.NoError(t, err)
cases := []struct {
metas []dirMeta
}{
{
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 40, 60, nil),
metaRange("4", 60, 80, nil),
},
},
{
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 60, 80, nil),
metaRange("4", 80, 100, nil),
},
},
{
metas: []dirMeta{
metaRange("1", 0, 20, nil),
metaRange("2", 20, 40, nil),
metaRange("3", 40, 60, nil),
metaRange("4", 60, 120, nil),
metaRange("5", 120, 180, nil),
metaRange("6", 180, 200, nil),
},
},
}
for _, c := range cases {
c.metas[1].meta.Compaction.Failed = true
res, err := compactor.plan(c.metas)
require.NoError(t, err)
require.Equal(t, []string(nil), res)
}
}
func TestCompactionFailWillCleanUpTempDir(t *testing.T) {
compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{
20,
60,
240,
720,
2160,
}, nil, nil)
require.NoError(t, err)
tmpdir := t.TempDir()
require.Error(t, compactor.write(tmpdir, &BlockMeta{}, DefaultBlockPopulator{}, erringBReader{}))
_, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix)
require.True(t, os.IsNotExist(err), "directory is not cleaned up")
}
func metaRange(name string, mint, maxt int64, stats *BlockStats) dirMeta {
meta := &BlockMeta{MinTime: mint, MaxTime: maxt}
if stats != nil {
meta.Stats = *stats
}
return dirMeta{
dir: name,
meta: meta,
}
}
type erringBReader struct{}
func (erringBReader) Index() (IndexReader, error) { return nil, errors.New("index") }
func (erringBReader) Chunks() (ChunkReader, error) { return nil, errors.New("chunks") }
func (erringBReader) Tombstones() (tombstones.Reader, error) { return nil, errors.New("tombstones") }
func (erringBReader) Meta() BlockMeta { return BlockMeta{} }
func (erringBReader) Size() int64 { return 0 }
type nopChunkWriter struct{}
func (nopChunkWriter) WriteChunks(...chunks.Meta) error { return nil }
func (nopChunkWriter) Close() error { return nil }
func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) {
var curr []sample
for i := minTime; i <= maxTime; i++ {
curr = append(curr, sample{t: i})
if len(curr) >= maxSamplesPerChunk {
ret = append(ret, curr)
curr = []sample{}
}
}
if len(curr) > 0 {
ret = append(ret, curr)
}
return ret
}
func TestCompaction_populateBlock(t *testing.T) {
for _, tc := range []struct {
title string
inputSeriesSamples [][]seriesSamples
compactMinTime int64
compactMaxTime int64 // When not defined the test runner sets a default of math.MaxInt64.
irPostingsFunc IndexReaderPostingsFunc
expSeriesSamples []seriesSamples
expErr error
}{
{
title: "Populate block from empty input should return error.",
inputSeriesSamples: [][]seriesSamples{},
expErr: errors.New("cannot populate block from no readers"),
},
{
// Populate from single block without chunks. We expect these kind of series being ignored.
inputSeriesSamples: [][]seriesSamples{
{{lset: map[string]string{"a": "b"}}},
},
},
{
title: "Populate from single block. We expect the same samples at the output.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
{
title: "Populate from two blocks.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}},
},
{
// no-chunk series should be dropped.
lset: map[string]string{"a": "empty"},
},
},
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 21}, {t: 30}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 40}, {t: 45}}},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}},
},
},
},
{
title: "Populate from two blocks; chunks with negative time.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: -11}, {t: -9}}, {{t: 10}, {t: 19}}},
},
{
// no-chunk series should be dropped.
lset: map[string]string{"a": "empty"},
},
},
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 21}, {t: 30}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 40}, {t: 45}}},
},
},
},
compactMinTime: -11,
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: -11}, {t: -9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}},
},
},
},
{
title: "Populate from two blocks showing that order is maintained.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}},
},
},
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 21}, {t: 30}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 40}, {t: 45}}},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}},
},
},
},
{
title: "Populate from two blocks showing that order of series is sorted.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "4"},
chunks: [][]sample{{{t: 5}, {t: 7}}},
},
{
lset: map[string]string{"a": "3"},
chunks: [][]sample{{{t: 5}, {t: 6}}},
},
{
lset: map[string]string{"a": "same"},
chunks: [][]sample{{{t: 1}, {t: 4}}},
},
},
{
{
lset: map[string]string{"a": "2"},
chunks: [][]sample{{{t: 1}, {t: 3}}},
},
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{{{t: 1}, {t: 2}}},
},
{
lset: map[string]string{"a": "same"},
chunks: [][]sample{{{t: 5}, {t: 8}}},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{{{t: 1}, {t: 2}}},
},
{
lset: map[string]string{"a": "2"},
chunks: [][]sample{{{t: 1}, {t: 3}}},
},
{
lset: map[string]string{"a": "3"},
chunks: [][]sample{{{t: 5}, {t: 6}}},
},
{
lset: map[string]string{"a": "4"},
chunks: [][]sample{{{t: 5}, {t: 7}}},
},
{
lset: map[string]string{"a": "same"},
chunks: [][]sample{{{t: 1}, {t: 4}}, {{t: 5}, {t: 8}}},
},
},
},
{
title: "Populate from two blocks 1:1 duplicated chunks; with negative timestamps.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 3}, {t: 4}}},
},
{
lset: map[string]string{"a": "2"},
chunks: [][]sample{{{t: -3}, {t: -2}}, {{t: 1}, {t: 3}, {t: 4}}, {{t: 5}, {t: 6}}},
},
},
{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{{{t: 3}, {t: 4}}},
},
{
lset: map[string]string{"a": "2"},
chunks: [][]sample{{{t: 1}, {t: 3}, {t: 4}}, {{t: 7}, {t: 8}}},
},
},
},
compactMinTime: -3,
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 3}, {t: 4}}},
},
{
lset: map[string]string{"a": "2"},
chunks: [][]sample{{{t: -3}, {t: -2}}, {{t: 1}, {t: 3}, {t: 4}}, {{t: 5}, {t: 6}}, {{t: 7}, {t: 8}}},
},
},
},
{
// This should not happened because head block is making sure the chunks are not crossing block boundaries.
// We used to return error, but now chunk is trimmed.
title: "Populate from single block containing chunk outside of compact meta time range.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 30}}},
},
},
},
compactMinTime: 0,
compactMaxTime: 20,
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}}},
},
},
},
{
// Introduced by https://github.com/prometheus/tsdb/issues/347. We used to return error, but now chunk is trimmed.
title: "Populate from single block containing extra chunk",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "issue347"},
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}},
},
},
},
compactMinTime: 0,
compactMaxTime: 10,
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "issue347"},
chunks: [][]sample{{{t: 1}, {t: 2}}},
},
},
},
{
// Deduplication expected.
// Introduced by pull/370 and pull/539.
title: "Populate from two blocks containing duplicated chunk.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}},
},
},
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 10}, {t: 20}}},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}},
},
},
},
{
// Introduced by https://github.com/prometheus/tsdb/pull/539.
title: "Populate from three overlapping blocks.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "overlap-all"},
chunks: [][]sample{{{t: 19}, {t: 30}}},
},
{
lset: map[string]string{"a": "overlap-beginning"},
chunks: [][]sample{{{t: 0}, {t: 5}}},
},
{
lset: map[string]string{"a": "overlap-ending"},
chunks: [][]sample{{{t: 21}, {t: 30}}},
},
},
{
{
lset: map[string]string{"a": "overlap-all"},
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "overlap-beginning"},
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 12}, {t: 20}}},
},
{
lset: map[string]string{"a": "overlap-ending"},
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 13}, {t: 20}}},
},
},
{
{
lset: map[string]string{"a": "overlap-all"},
chunks: [][]sample{{{t: 27}, {t: 35}}},
},
{
lset: map[string]string{"a": "overlap-ending"},
chunks: [][]sample{{{t: 27}, {t: 35}}},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "overlap-all"},
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 19}, {t: 20}, {t: 27}, {t: 30}, {t: 35}}},
},
{
lset: map[string]string{"a": "overlap-beginning"},
chunks: [][]sample{{{t: 0}, {t: 5}, {t: 10}, {t: 12}, {t: 20}}},
},
{
lset: map[string]string{"a": "overlap-ending"},
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 13}, {t: 20}}, {{t: 21}, {t: 27}, {t: 30}, {t: 35}}},
},
},
},
{
title: "Populate from three partially overlapping blocks with few full chunks.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "1", "b": "1"},
chunks: samplesForRange(0, 659, 120), // 5 chunks and half.
},
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: samplesForRange(0, 659, 120),
},
},
{
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: samplesForRange(480, 1199, 120), // two chunks overlapping with previous, two non overlapping and two overlapping with next block.
},
{
lset: map[string]string{"a": "1", "b": "3"},
chunks: samplesForRange(480, 1199, 120),
},
},
{
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: samplesForRange(960, 1499, 120), // 5 chunks and half.
},
{
lset: map[string]string{"a": "1", "b": "4"},
chunks: samplesForRange(960, 1499, 120),
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "1", "b": "1"},
chunks: samplesForRange(0, 659, 120),
},
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: samplesForRange(0, 1499, 120),
},
{
lset: map[string]string{"a": "1", "b": "3"},
chunks: samplesForRange(480, 1199, 120),
},
{
lset: map[string]string{"a": "1", "b": "4"},
chunks: samplesForRange(960, 1499, 120),
},
},
},
{
title: "Populate from three partially overlapping blocks with chunks that are expected to merge into single big chunks.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: [][]sample{{{t: 0}, {t: 6902464}}, {{t: 6961968}, {t: 7080976}}},
},
},
{
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: [][]sample{{{t: 3600000}, {t: 13953696}}, {{t: 14042952}, {t: 14221464}}},
},
},
{
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: [][]sample{{{t: 10800000}, {t: 14251232}}, {{t: 14280984}, {t: 14340488}}},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "1", "b": "2"},
chunks: [][]sample{{{t: 0}, {t: 3600000}, {t: 6902464}, {t: 6961968}, {t: 7080976}, {t: 10800000}, {t: 13953696}, {t: 14042952}, {t: 14221464}, {t: 14251232}}, {{t: 14280984}, {t: 14340488}}},
},
},
},
{
// Regression test for populateWithDelChunkSeriesIterator failing to set minTime on chunks.
title: "Populate from mixed type series and expect sample inside the interval only.",
compactMinTime: 1,
compactMaxTime: 11,
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{
{{t: 0, h: tsdbutil.GenerateTestHistogram(0)}, {t: 1, h: tsdbutil.GenerateTestHistogram(1)}},
{{t: 10, f: 1}, {t: 11, f: 2}},
},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{
{{t: 1, h: tsdbutil.GenerateTestHistogram(1)}},
{{t: 10, f: 1}},
},
},
},
},
{
title: "Populate from single block with index reader postings function selecting different series. Expect empty block.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings {
p, err := reader.Postings(ctx, "a", "c")
if err != nil {
return index.EmptyPostings()
}
return reader.SortedPostings(p)
},
},
{
title: "Populate from single block with index reader postings function selecting one series. Expect partial block.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "d"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings {
p, err := reader.Postings(ctx, "a", "c", "d")
if err != nil {
return index.EmptyPostings()
}
return reader.SortedPostings(p)
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "d"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
} {
t.Run(tc.title, func(t *testing.T) {
blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples))
for _, b := range tc.inputSeriesSamples {
ir, cr, mint, maxt := createIdxChkReaders(t, b)
blocks = append(blocks, &mockBReader{ir: ir, cr: cr, mint: mint, maxt: maxt})
}
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil, nil)
require.NoError(t, err)
meta := &BlockMeta{
MinTime: tc.compactMinTime,
MaxTime: tc.compactMaxTime,
}
if meta.MaxTime == 0 {
meta.MaxTime = math.MaxInt64
}
iw := &mockIndexWriter{}
blockPopulator := DefaultBlockPopulator{}
irPostingsFunc := AllSortedPostings
if tc.irPostingsFunc != nil {
irPostingsFunc = tc.irPostingsFunc
}
err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc)
if tc.expErr != nil {
require.EqualError(t, err, tc.expErr.Error())
return
}
require.NoError(t, err)
// Check if response is expected and chunk is valid.
var raw []seriesSamples
for _, s := range iw.seriesChunks {
ss := seriesSamples{lset: s.l.Map()}
var iter chunkenc.Iterator
for _, chk := range s.chunks {
var (
samples = make([]sample, 0, chk.Chunk.NumSamples())
iter = chk.Chunk.Iterator(iter)
firstTs int64 = math.MaxInt64
s sample
)
for vt := iter.Next(); vt != chunkenc.ValNone; vt = iter.Next() {
switch vt {
case chunkenc.ValFloat:
s.t, s.f = iter.At()
samples = append(samples, s)
case chunkenc.ValHistogram:
s.t, s.h = iter.AtHistogram(nil)
samples = append(samples, s)
case chunkenc.ValFloatHistogram:
s.t, s.fh = iter.AtFloatHistogram(nil)
samples = append(samples, s)
default:
require.Fail(t, "unexpected value type")
}
if firstTs == math.MaxInt64 {
firstTs = s.t
}
}
// Check if chunk has correct min, max times.
require.Equal(t, firstTs, chk.MinTime, "chunk Meta %v does not match the first encoded sample timestamp: %v", chk, firstTs)
require.Equal(t, s.t, chk.MaxTime, "chunk Meta %v does not match the last encoded sample timestamp %v", chk, s.t)
require.NoError(t, iter.Err())
ss.chunks = append(ss.chunks, samples)
}
raw = append(raw, ss)
}
require.Equal(t, tc.expSeriesSamples, raw)
// Check if stats are calculated properly.
s := BlockStats{NumSeries: uint64(len(tc.expSeriesSamples))}
for _, series := range tc.expSeriesSamples {
s.NumChunks += uint64(len(series.chunks))
for _, chk := range series.chunks {
s.NumSamples += uint64(len(chk))
for _, smpl := range chk {
if smpl.h != nil || smpl.fh != nil {
s.NumHistogramSamples++
} else {
s.NumFloatSamples++
}
}
}
}
require.Equal(t, s, meta.Stats)
})
}
}
func BenchmarkCompaction(b *testing.B) {
cases := []struct {
ranges [][2]int64
compactionType string
}{
{
ranges: [][2]int64{{0, 100}, {200, 300}, {400, 500}, {600, 700}},
compactionType: "normal",
},
{
ranges: [][2]int64{{0, 1000}, {2000, 3000}, {4000, 5000}, {6000, 7000}},
compactionType: "normal",
},
{
ranges: [][2]int64{{0, 2000}, {3000, 5000}, {6000, 8000}, {9000, 11000}},
compactionType: "normal",
},
{
ranges: [][2]int64{{0, 5000}, {6000, 11000}, {12000, 17000}, {18000, 23000}},
compactionType: "normal",
},
// 40% overlaps.
{
ranges: [][2]int64{{0, 100}, {60, 160}, {120, 220}, {180, 280}},
compactionType: "vertical",
},
{
ranges: [][2]int64{{0, 1000}, {600, 1600}, {1200, 2200}, {1800, 2800}},
compactionType: "vertical",
},
{
ranges: [][2]int64{{0, 2000}, {1200, 3200}, {2400, 4400}, {3600, 5600}},
compactionType: "vertical",
},
{
ranges: [][2]int64{{0, 5000}, {3000, 8000}, {6000, 11000}, {9000, 14000}},
compactionType: "vertical",
},
}
nSeries := 10000
for _, c := range cases {
nBlocks := len(c.ranges)
b.Run(fmt.Sprintf("type=%s,blocks=%d,series=%d,samplesPerSeriesPerBlock=%d", c.compactionType, nBlocks, nSeries, c.ranges[0][1]-c.ranges[0][0]+1), func(b *testing.B) {
dir := b.TempDir()
blockDirs := make([]string, 0, len(c.ranges))
var blocks []*Block
for _, r := range c.ranges {
block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil, nil)
require.NoError(b, err)
blocks = append(blocks, block)
defer func() {
require.NoError(b, block.Close())
}()
blockDirs = append(blockDirs, block.Dir())
}
c, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, nil)
require.NoError(b, err)
b.ResetTimer()
b.ReportAllocs()
for b.Loop() {
_, err = c.Compact(dir, blockDirs, blocks)
require.NoError(b, err)
}
})
}
}
func BenchmarkCompactionFromHead(b *testing.B) {
dir := b.TempDir()
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/repair_test.go | tsdb/repair_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/testutil"
)
func TestRepairBadIndexVersion(t *testing.T) {
tmpDir := t.TempDir()
ctx := context.Background()
// The broken index used in this test was written by the following script
// at a broken revision.
//
// func main() {
// w, err := index.NewWriter(indexFilename)
// if err != nil {
// panic(err)
// }
// err = w.AddSymbols(map[string]struct{}{
// "a": struct{}{},
// "b": struct{}{},
// "1": struct{}{},
// "2": struct{}{},
// })
// if err != nil {
// panic(err)
// }
// err = w.AddSeries(1, labels.FromStrings("a", "1", "b", "1"))
// if err != nil {
// panic(err)
// }
// err = w.AddSeries(2, labels.FromStrings("a", "2", "b", "1"))
// if err != nil {
// panic(err)
// }
// err = w.WritePostings("b", "1", index.NewListPostings([]uint64{1, 2}))
// if err != nil {
// panic(err)
// }
// if err := w.Close(); err != nil {
// panic(err)
// }
// }
tmpDbDir := filepath.Join(tmpDir, "01BZJ9WJQPWHGNC2W4J9TA62KC")
// Create a copy DB to run test against.
require.NoError(t, fileutil.CopyDirs(filepath.Join("testdata", "repair_index_version", "01BZJ9WJQPWHGNC2W4J9TA62KC"), tmpDbDir))
// Check the current db.
// In its current state, lookups should fail with the fixed code.
_, _, err := readMetaFile(tmpDbDir)
require.Error(t, err)
// Touch chunks dir in block to imitate them.
require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0o777))
// Read current index to check integrity.
r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), index.DecodePostingsRaw)
require.NoError(t, err)
p, err := r.Postings(ctx, "b", "1")
require.NoError(t, err)
var builder labels.ScratchBuilder
for p.Next() {
t.Logf("next ID %d", p.At())
require.Error(t, r.Series(p.At(), &builder, nil))
}
require.NoError(t, p.Err())
require.NoError(t, r.Close())
// On DB opening all blocks in the base dir should be repaired.
db, err := Open(tmpDir, nil, nil, nil, nil)
require.NoError(t, err)
db.Close()
r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), index.DecodePostingsRaw)
require.NoError(t, err)
defer r.Close()
p, err = r.Postings(ctx, "b", "1")
require.NoError(t, err)
res := []labels.Labels{}
for p.Next() {
t.Logf("next ID %d", p.At())
var chks []chunks.Meta
require.NoError(t, r.Series(p.At(), &builder, &chks))
res = append(res, builder.Labels())
}
require.NoError(t, p.Err())
testutil.RequireEqual(t, []labels.Labels{
labels.FromStrings("a", "1", "b", "1"),
labels.FromStrings("a", "2", "b", "1"),
}, res)
meta, _, err := readMetaFile(tmpDbDir)
require.NoError(t, err)
require.Equal(t, metaVersion1, meta.Version, "unexpected meta version %d", meta.Version)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/head_dedupelabels.go | tsdb/head_dedupelabels.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build dedupelabels
package tsdb
import (
"log/slog"
"github.com/prometheus/prometheus/model/labels"
)
// Helper method to access labels under lock.
func (s *memSeries) labels() labels.Labels {
s.Lock()
defer s.Unlock()
return s.lset
}
// RebuildSymbolTable goes through all the series in h, build a SymbolTable with all names and values,
// replace each series' Labels with one using that SymbolTable.
func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable {
logger.Info("RebuildSymbolTable starting")
st := labels.NewSymbolTable()
builder := labels.NewScratchBuilderWithSymbolTable(st, 0)
rebuildLabels := func(lbls labels.Labels) labels.Labels {
builder.Reset()
lbls.Range(func(l labels.Label) {
builder.Add(l.Name, l.Value)
})
return builder.Labels()
}
for i := 0; i < h.series.size; i++ {
h.series.locks[i].Lock()
for _, s := range h.series.hashes[i].unique {
s.Lock()
s.lset = rebuildLabels(s.lset)
s.Unlock()
}
for _, all := range h.series.hashes[i].conflicts {
for _, s := range all {
s.Lock()
s.lset = rebuildLabels(s.lset)
s.Unlock()
}
}
h.series.locks[i].Unlock()
}
type withReset interface{ ResetSymbolTable(*labels.SymbolTable) }
if e, ok := h.exemplars.(withReset); ok {
e.ResetSymbolTable(st)
}
logger.Info("RebuildSymbolTable finished", "size", st.Len())
return st
}
func (ce *CircularExemplarStorage) ResetSymbolTable(st *labels.SymbolTable) {
builder := labels.NewScratchBuilderWithSymbolTable(st, 0)
rebuildLabels := func(lbls labels.Labels) labels.Labels {
builder.Reset()
lbls.Range(func(l labels.Label) {
builder.Add(l.Name, l.Value)
})
return builder.Labels()
}
ce.lock.RLock()
defer ce.lock.RUnlock()
for _, v := range ce.index {
v.seriesLabels = rebuildLabels(v.seriesLabels)
}
for i := range ce.exemplars {
if ce.exemplars[i].ref == nil {
continue
}
ce.exemplars[i].exemplar.Labels = rebuildLabels(ce.exemplars[i].exemplar.Labels)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/exemplar_test.go | tsdb/exemplar_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"fmt"
"math"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
)
var eMetrics = NewExemplarMetrics(prometheus.DefaultRegisterer)
// Tests the same exemplar cases as AddExemplar, but specifically the ValidateExemplar function so it can be relied on externally.
func TestValidateExemplar(t *testing.T) {
exs, err := NewCircularExemplarStorage(2, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
l := labels.FromStrings("service", "asdf")
e := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "qwerty"),
Value: 0.1,
Ts: 1,
}
require.NoError(t, es.ValidateExemplar(l, e))
require.NoError(t, es.AddExemplar(l, e))
e2 := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "zxcvb"),
Value: 0.1,
Ts: 2,
}
require.NoError(t, es.ValidateExemplar(l, e2))
require.NoError(t, es.AddExemplar(l, e2))
require.Equal(t, es.ValidateExemplar(l, e2), storage.ErrDuplicateExemplar, "error is expected attempting to validate duplicate exemplar")
e3 := e2
e3.Ts = 3
require.Equal(t, es.ValidateExemplar(l, e3), storage.ErrDuplicateExemplar, "error is expected when attempting to add duplicate exemplar, even with different timestamp")
e3.Ts = 1
e3.Value = 0.3
require.Equal(t, es.ValidateExemplar(l, e3), storage.ErrOutOfOrderExemplar)
e4 := exemplar.Exemplar{
Labels: labels.FromStrings("a", strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength)),
Value: 0.1,
Ts: 2,
}
require.Equal(t, storage.ErrExemplarLabelLength, es.ValidateExemplar(l, e4))
}
func TestAddExemplar(t *testing.T) {
exs, err := NewCircularExemplarStorage(2, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
l := labels.FromStrings("service", "asdf")
e := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "qwerty"),
Value: 0.1,
Ts: 1,
}
require.NoError(t, es.AddExemplar(l, e))
require.Equal(t, 0, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly")
e2 := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "zxcvb"),
Value: 0.1,
Ts: 2,
}
require.NoError(t, es.AddExemplar(l, e2))
require.Equal(t, 1, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly, location of newest exemplar for series in index did not update")
require.True(t, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar)
require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar")
e3 := e2
e3.Ts = 3
require.NoError(t, es.AddExemplar(l, e3), "no error is expected when attempting to add duplicate exemplar, even with different timestamp")
e3.Ts = 1
e3.Value = 0.3
require.Equal(t, storage.ErrOutOfOrderExemplar, es.AddExemplar(l, e3))
e4 := exemplar.Exemplar{
Labels: labels.FromStrings("a", strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength)),
Value: 0.1,
Ts: 2,
}
require.Equal(t, storage.ErrExemplarLabelLength, es.AddExemplar(l, e4))
}
func TestStorageOverflow(t *testing.T) {
// Test that circular buffer index and assignment
// works properly, adding more exemplars than can
// be stored and then querying for them.
exs, err := NewCircularExemplarStorage(5, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
lName, lValue := "service", "asdf"
l := labels.FromStrings(lName, lValue)
var eList []exemplar.Exemplar
for i := 0; i < len(es.exemplars)+1; i++ {
e := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "a"),
Value: float64(i+1) / 10,
Ts: int64(101 + i),
}
es.AddExemplar(l, e)
eList = append(eList, e)
}
require.True(t, (es.exemplars[0].exemplar.Ts == 106), "exemplar was not stored correctly")
m, err := labels.NewMatcher(labels.MatchEqual, lName, lValue)
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(100, 110, []*labels.Matcher{m})
require.NoError(t, err)
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.True(t, reflect.DeepEqual(eList[1:], ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", eList[1:], ret[0].Exemplars)
}
func TestSelectExemplar(t *testing.T) {
exs, err := NewCircularExemplarStorage(5, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
lName, lValue := "service", "asdf"
l := labels.FromStrings(lName, lValue)
e := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "querty"),
Value: 0.1,
Ts: 12,
}
err = es.AddExemplar(l, e)
require.NoError(t, err, "adding exemplar failed")
require.True(t, reflect.DeepEqual(es.exemplars[0].exemplar, e), "exemplar was not stored correctly")
m, err := labels.NewMatcher(labels.MatchEqual, lName, lValue)
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(0, 100, []*labels.Matcher{m})
require.NoError(t, err)
require.Len(t, ret, 1, "select should have returned samples for a single series only")
expectedResult := []exemplar.Exemplar{e}
require.True(t, reflect.DeepEqual(expectedResult, ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", expectedResult, ret[0].Exemplars)
}
func TestSelectExemplar_MultiSeries(t *testing.T) {
exs, err := NewCircularExemplarStorage(5, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
l1Name := "test_metric"
l1 := labels.FromStrings(labels.MetricName, l1Name, "service", "asdf")
l2Name := "test_metric2"
l2 := labels.FromStrings(labels.MetricName, l2Name, "service", "qwer")
for i := 0; i < len(es.exemplars); i++ {
e1 := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "a"),
Value: float64(i+1) / 10,
Ts: int64(101 + i),
}
err = es.AddExemplar(l1, e1)
require.NoError(t, err)
e2 := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "b"),
Value: float64(i+1) / 10,
Ts: int64(101 + i),
}
err = es.AddExemplar(l2, e2)
require.NoError(t, err)
}
m, err := labels.NewMatcher(labels.MatchEqual, labels.MetricName, l2Name)
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(100, 200, []*labels.Matcher{m})
require.NoError(t, err)
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.Len(t, ret[0].Exemplars, 3, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars))
m, err = labels.NewMatcher(labels.MatchEqual, labels.MetricName, l1Name)
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err = es.Select(100, 200, []*labels.Matcher{m})
require.NoError(t, err)
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.Len(t, ret[0].Exemplars, 2, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars))
}
func TestSelectExemplar_TimeRange(t *testing.T) {
var lenEs int64 = 5
exs, err := NewCircularExemplarStorage(lenEs, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
lName, lValue := "service", "asdf"
l := labels.FromStrings(lName, lValue)
for i := 0; int64(i) < lenEs; i++ {
err := es.AddExemplar(l, exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(i)),
Value: 0.1,
Ts: int64(101 + i),
})
require.NoError(t, err)
require.Equal(t, es.index[string(l.Bytes(nil))].newest, i, "exemplar was not stored correctly")
}
m, err := labels.NewMatcher(labels.MatchEqual, lName, lValue)
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(102, 104, []*labels.Matcher{m})
require.NoError(t, err)
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.Len(t, ret[0].Exemplars, 3, "didn't get expected two exemplars %d, %+v", len(ret[0].Exemplars), ret)
}
// Test to ensure that even though a series matches more than one matcher from the
// query that it's exemplars are only included in the result a single time.
func TestSelectExemplar_DuplicateSeries(t *testing.T) {
exs, err := NewCircularExemplarStorage(4, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
e := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "qwerty"),
Value: 0.1,
Ts: 12,
}
lName0, lValue0 := "service", "asdf"
lName1, lValue1 := "cluster", "us-central1"
l := labels.FromStrings(lName0, lValue0, lName1, lValue1)
// Lets just assume somehow the PromQL expression generated two separate lists of matchers,
// both of which can select this particular series.
m := [][]*labels.Matcher{
{
labels.MustNewMatcher(labels.MatchEqual, lName0, lValue0),
},
{
labels.MustNewMatcher(labels.MatchEqual, lName1, lValue1),
},
}
err = es.AddExemplar(l, e)
require.NoError(t, err, "adding exemplar failed")
require.True(t, reflect.DeepEqual(es.exemplars[0].exemplar, e), "exemplar was not stored correctly")
ret, err := es.Select(0, 100, m...)
require.NoError(t, err)
require.Len(t, ret, 1, "select should have returned samples for a single series only")
}
func TestIndexOverwrite(t *testing.T) {
exs, err := NewCircularExemplarStorage(2, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
l1 := labels.FromStrings("service", "asdf")
l2 := labels.FromStrings("service", "qwer")
err = es.AddExemplar(l1, exemplar.Exemplar{Value: 1, Ts: 1})
require.NoError(t, err)
err = es.AddExemplar(l2, exemplar.Exemplar{Value: 2, Ts: 2})
require.NoError(t, err)
err = es.AddExemplar(l2, exemplar.Exemplar{Value: 3, Ts: 3})
require.NoError(t, err)
// Ensure index GC'ing is taking place, there should no longer be any
// index entry for series l1 since we just wrote two exemplars for series l2.
_, ok := es.index[string(l1.Bytes(nil))]
require.False(t, ok)
require.Equal(t, &indexEntry{1, 0, l2}, es.index[string(l2.Bytes(nil))])
err = es.AddExemplar(l1, exemplar.Exemplar{Value: 4, Ts: 4})
require.NoError(t, err)
i := es.index[string(l2.Bytes(nil))]
require.Equal(t, &indexEntry{0, 0, l2}, i)
}
func TestResize(t *testing.T) {
testCases := []struct {
name string
startSize int64
newCount int64
expectedSeries []int
notExpectedSeries []int
expectedMigrated int
}{
{
name: "Grow",
startSize: 100,
newCount: 200,
expectedSeries: []int{99, 98, 1, 0},
notExpectedSeries: []int{100},
expectedMigrated: 100,
},
{
name: "Shrink",
startSize: 100,
newCount: 50,
expectedSeries: []int{99, 98, 50},
notExpectedSeries: []int{49, 1, 0},
expectedMigrated: 50,
},
{
name: "ShrinkToZero",
startSize: 100,
newCount: 0,
expectedSeries: []int{},
notExpectedSeries: []int{},
expectedMigrated: 0,
},
{
name: "Negative",
startSize: 100,
newCount: -1,
expectedSeries: []int{},
notExpectedSeries: []int{},
expectedMigrated: 0,
},
{
name: "NegativeToNegative",
startSize: -1,
newCount: -2,
expectedSeries: []int{},
notExpectedSeries: []int{},
expectedMigrated: 0,
},
{
name: "GrowFromZero",
startSize: 0,
newCount: 10,
expectedSeries: []int{},
notExpectedSeries: []int{},
expectedMigrated: 0,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
for i := 0; int64(i) < tc.startSize; i++ {
err = es.AddExemplar(labels.FromStrings("service", strconv.Itoa(i)), exemplar.Exemplar{
Value: float64(i),
Ts: int64(i),
})
require.NoError(t, err)
}
resized := es.Resize(tc.newCount)
require.Equal(t, tc.expectedMigrated, resized)
q, err := es.Querier(context.TODO())
require.NoError(t, err)
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "service", "")}
for _, expected := range tc.expectedSeries {
matchers[0].Value = strconv.Itoa(expected)
ex, err := q.Select(0, math.MaxInt64, matchers)
require.NoError(t, err)
require.NotEmpty(t, ex)
}
for _, notExpected := range tc.notExpectedSeries {
matchers[0].Value = strconv.Itoa(notExpected)
ex, err := q.Select(0, math.MaxInt64, matchers)
require.NoError(t, err)
require.Empty(t, ex)
}
})
}
}
func BenchmarkAddExemplar(b *testing.B) {
// We need to include these labels since we do length calculation
// before adding.
exLabels := labels.FromStrings("trace_id", "89620921")
for _, capacity := range []int{1000, 10000, 100000} {
for _, n := range []int{10000, 100000, 1000000} {
b.Run(fmt.Sprintf("%d/%d", n, capacity), func(b *testing.B) {
for b.Loop() {
b.StopTimer()
exs, err := NewCircularExemplarStorage(int64(capacity), eMetrics)
require.NoError(b, err)
es := exs.(*CircularExemplarStorage)
var l labels.Labels
b.StartTimer()
for i := range n {
if i%100 == 0 {
l = labels.FromStrings("service", strconv.Itoa(i))
}
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels})
if err != nil {
require.NoError(b, err)
}
}
}
})
}
}
}
func BenchmarkResizeExemplars(b *testing.B) {
testCases := []struct {
name string
startSize int64
endSize int64
numExemplars int
}{
{
name: "grow",
startSize: 100000,
endSize: 200000,
numExemplars: 150000,
},
{
name: "shrink",
startSize: 100000,
endSize: 50000,
numExemplars: 100000,
},
{
name: "grow",
startSize: 1000000,
endSize: 2000000,
numExemplars: 1500000,
},
{
name: "shrink",
startSize: 1000000,
endSize: 500000,
numExemplars: 1000000,
},
}
for _, tc := range testCases {
b.Run(fmt.Sprintf("%s-%d-to-%d", tc.name, tc.startSize, tc.endSize), func(b *testing.B) {
for b.Loop() {
b.StopTimer()
exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
require.NoError(b, err)
es := exs.(*CircularExemplarStorage)
var l labels.Labels
for i := 0; i < int(float64(tc.startSize)*float64(1.5)); i++ {
if i%100 == 0 {
l = labels.FromStrings("service", strconv.Itoa(i))
}
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i)})
if err != nil {
require.NoError(b, err)
}
}
b.StartTimer()
es.Resize(tc.endSize)
}
})
}
}
// TestCircularExemplarStorage_Concurrent_AddExemplar_Resize tries to provoke a data race between AddExemplar and Resize.
// Run with race detection enabled.
func TestCircularExemplarStorage_Concurrent_AddExemplar_Resize(t *testing.T) {
exs, err := NewCircularExemplarStorage(0, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
l := labels.FromStrings("service", "asdf")
e := exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", "qwerty"),
Value: 0.1,
Ts: 1,
}
var wg sync.WaitGroup
wg.Add(1)
t.Cleanup(wg.Wait)
started := make(chan struct{})
go func() {
defer wg.Done()
<-started
for range 100 {
require.NoError(t, es.AddExemplar(l, e))
}
}()
for i := range 100 {
es.Resize(int64(i + 1))
if i == 0 {
close(started)
}
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/encoding/encoding.go | tsdb/encoding/encoding.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"math"
"github.com/dennwc/varint"
)
var (
ErrInvalidSize = errors.New("invalid size")
ErrInvalidChecksum = errors.New("invalid checksum")
)
// Encbuf is a helper type to populate a byte slice with various types.
type Encbuf struct {
B []byte
C [binary.MaxVarintLen64]byte
}
func (e *Encbuf) Reset() { e.B = e.B[:0] }
func (e *Encbuf) Get() []byte { return e.B }
func (e *Encbuf) Len() int { return len(e.B) }
func (e *Encbuf) PutString(s string) { e.B = append(e.B, s...) }
func (e *Encbuf) PutByte(c byte) { e.B = append(e.B, c) }
func (e *Encbuf) PutBytes(b []byte) { e.B = append(e.B, b...) }
func (e *Encbuf) PutBE32int(x int) { e.PutBE32(uint32(x)) }
func (e *Encbuf) PutUvarint32(x uint32) { e.PutUvarint64(uint64(x)) }
func (e *Encbuf) PutBE64int64(x int64) { e.PutBE64(uint64(x)) }
func (e *Encbuf) PutUvarint(x int) { e.PutUvarint64(uint64(x)) }
func (e *Encbuf) PutBE32(x uint32) {
binary.BigEndian.PutUint32(e.C[:], x)
e.B = append(e.B, e.C[:4]...)
}
func (e *Encbuf) PutBE64(x uint64) {
binary.BigEndian.PutUint64(e.C[:], x)
e.B = append(e.B, e.C[:8]...)
}
func (e *Encbuf) PutBEFloat64(x float64) {
e.PutBE64(math.Float64bits(x))
}
func (e *Encbuf) PutUvarint64(x uint64) {
n := binary.PutUvarint(e.C[:], x)
e.B = append(e.B, e.C[:n]...)
}
func (e *Encbuf) PutVarint64(x int64) {
n := binary.PutVarint(e.C[:], x)
e.B = append(e.B, e.C[:n]...)
}
// PutUvarintStr writes a string to the buffer prefixed by its varint length (in bytes!).
func (e *Encbuf) PutUvarintStr(s string) {
e.PutUvarint(len(s))
e.PutString(s)
}
// PutUvarintBytes writes a variable length byte buffer.
func (e *Encbuf) PutUvarintBytes(b []byte) {
e.PutUvarint(len(b))
e.PutBytes(b)
}
// PutHash appends a hash over the buffers current contents to the buffer.
func (e *Encbuf) PutHash(h hash.Hash) {
h.Reset()
e.WriteToHash(h)
e.PutHashSum(h)
}
// WriteToHash writes the current buffer contents to the given hash.
func (e *Encbuf) WriteToHash(h hash.Hash) {
_, err := h.Write(e.B)
if err != nil {
panic(err) // The CRC32 implementation does not error
}
}
// PutHashSum writes the Sum of the given hash to the buffer.
func (e *Encbuf) PutHashSum(h hash.Hash) {
e.B = h.Sum(e.B)
}
// Decbuf provides safe methods to extract data from a byte slice. It does all
// necessary bounds checking and advancing of the byte slice.
// Several datums can be extracted without checking for errors. However, before using
// any datum, the err() method must be checked.
type Decbuf struct {
B []byte
E error
}
// NewDecbufAt returns a new decoding buffer. It expects the first 4 bytes
// after offset to hold the big endian encoded content length, followed by the contents and the expected
// checksum.
func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf {
if bs.Len() < off+4 {
return Decbuf{E: ErrInvalidSize}
}
b := bs.Range(off, off+4)
l := int(binary.BigEndian.Uint32(b))
if bs.Len() < off+4+l+4 {
return Decbuf{E: ErrInvalidSize}
}
// Load bytes holding the contents plus a CRC32 checksum.
b = bs.Range(off+4, off+4+l+4)
dec := Decbuf{B: b[:len(b)-4]}
if castagnoliTable != nil {
if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp {
return Decbuf{E: ErrInvalidChecksum}
}
}
return dec
}
// NewDecbufUvarintAt returns a new decoding buffer. It expects the first bytes
// after offset to hold the uvarint-encoded buffers length, followed by the contents and the expected
// checksum.
func NewDecbufUvarintAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf {
// We never have to access this method at the far end of the byte slice. Thus just checking
// against the MaxVarintLen32 is sufficient.
if bs.Len() < off+binary.MaxVarintLen32 {
return Decbuf{E: ErrInvalidSize}
}
b := bs.Range(off, off+binary.MaxVarintLen32)
l, n := varint.Uvarint(b)
if n <= 0 || n > binary.MaxVarintLen32 {
return Decbuf{E: fmt.Errorf("invalid uvarint %d", n)}
}
if bs.Len() < off+n+int(l)+4 {
return Decbuf{E: ErrInvalidSize}
}
// Load bytes holding the contents plus a CRC32 checksum.
b = bs.Range(off+n, off+n+int(l)+4)
dec := Decbuf{B: b[:len(b)-4]}
if dec.Crc32(castagnoliTable) != binary.BigEndian.Uint32(b[len(b)-4:]) {
return Decbuf{E: ErrInvalidChecksum}
}
return dec
}
// NewDecbufRaw returns a new decoding buffer of the given length.
func NewDecbufRaw(bs ByteSlice, length int) Decbuf {
if bs.Len() < length {
return Decbuf{E: ErrInvalidSize}
}
return Decbuf{B: bs.Range(0, length)}
}
func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) }
func (d *Decbuf) Uvarint32() uint32 { return uint32(d.Uvarint64()) }
func (d *Decbuf) Be32int() int { return int(d.Be32()) }
func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) }
// Crc32 returns a CRC32 checksum over the remaining bytes.
func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 {
return crc32.Checksum(d.B, castagnoliTable)
}
func (d *Decbuf) Skip(l int) {
if len(d.B) < l {
d.E = ErrInvalidSize
return
}
d.B = d.B[l:]
}
func (d *Decbuf) UvarintStr() string {
return string(d.UvarintBytes())
}
// UvarintBytes returns a pointer to internal data;
// the return value becomes invalid if the byte slice goes away.
// Compared to UvarintStr, this avoids allocations.
func (d *Decbuf) UvarintBytes() []byte {
l := d.Uvarint64()
if d.E != nil {
return []byte{}
}
if len(d.B) < int(l) {
d.E = ErrInvalidSize
return []byte{}
}
s := d.B[:l]
d.B = d.B[l:]
return s
}
func (d *Decbuf) Varint64() int64 {
if d.E != nil {
return 0
}
// Decode as unsigned first, since that's what the varint library implements.
ux, n := varint.Uvarint(d.B)
if n < 1 {
d.E = ErrInvalidSize
return 0
}
// Now decode "ZigZag encoding" https://developers.google.com/protocol-buffers/docs/encoding#signed_integers.
x := int64(ux >> 1)
if ux&1 != 0 {
x = ^x
}
d.B = d.B[n:]
return x
}
func (d *Decbuf) Uvarint64() uint64 {
if d.E != nil {
return 0
}
x, n := varint.Uvarint(d.B)
if n < 1 {
d.E = ErrInvalidSize
return 0
}
d.B = d.B[n:]
return x
}
func (d *Decbuf) Be64() uint64 {
if d.E != nil {
return 0
}
if len(d.B) < 8 {
d.E = ErrInvalidSize
return 0
}
x := binary.BigEndian.Uint64(d.B)
d.B = d.B[8:]
return x
}
func (d *Decbuf) Be64Float64() float64 {
return math.Float64frombits(d.Be64())
}
func (d *Decbuf) Be32() uint32 {
if d.E != nil {
return 0
}
if len(d.B) < 4 {
d.E = ErrInvalidSize
return 0
}
x := binary.BigEndian.Uint32(d.B)
d.B = d.B[4:]
return x
}
func (d *Decbuf) Byte() byte {
if d.E != nil {
return 0
}
if len(d.B) < 1 {
d.E = ErrInvalidSize
return 0
}
x := d.B[0]
d.B = d.B[1:]
return x
}
func (d *Decbuf) ConsumePadding() {
if d.E != nil {
return
}
for len(d.B) > 1 && d.B[0] == '\x00' {
d.B = d.B[1:]
}
if len(d.B) < 1 {
d.E = ErrInvalidSize
}
}
func (d *Decbuf) Err() error { return d.E }
func (d *Decbuf) Len() int { return len(d.B) }
func (d *Decbuf) Get() []byte { return d.B }
// ByteSlice abstracts a byte slice.
type ByteSlice interface {
Len() int
Range(start, end int) []byte
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/index/index.go | tsdb/index/index.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"bufio"
"context"
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"io"
"math"
"os"
"path/filepath"
"slices"
"sort"
"unsafe"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
const (
// MagicIndex 4 bytes at the head of an index file.
MagicIndex = 0xBAAAD700
// HeaderLen represents number of bytes reserved of index for header.
HeaderLen = 5
// FormatV1 represents version 1 of index.
FormatV1 = 1
// FormatV2 represents version 2 of index.
FormatV2 = 2
// FormatV3 represents version 3 of index.
FormatV3 = 3
indexFilename = "index"
seriesByteAlign = 16
// checkContextEveryNIterations is used in some tight loops to check if the context is done.
checkContextEveryNIterations = 128
)
type indexWriterSeries struct {
labels labels.Labels
chunks []chunks.Meta // series file offset of chunks
}
type indexWriterSeriesSlice []*indexWriterSeries
func (s indexWriterSeriesSlice) Len() int { return len(s) }
func (s indexWriterSeriesSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s indexWriterSeriesSlice) Less(i, j int) bool {
return labels.Compare(s[i].labels, s[j].labels) < 0
}
type indexWriterStage uint8
const (
idxStageNone indexWriterStage = iota
idxStageSymbols
idxStageSeries
idxStageDone
)
func (s indexWriterStage) String() string {
switch s {
case idxStageNone:
return "none"
case idxStageSymbols:
return "symbols"
case idxStageSeries:
return "series"
case idxStageDone:
return "done"
}
return "<unknown>"
}
// ErrPostingsOffsetTableTooLarge is returned when the postings offset table length
// would exceed 4 bytes (table would exceed the 4GB limit).
var ErrPostingsOffsetTableTooLarge = errors.New("length size exceeds 4 bytes")
// ErrIndexExceeds64GiB is returned when the index file would exceed the 64GiB limit.
var ErrIndexExceeds64GiB = errors.New("exceeding max size of 64GiB")
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
var castagnoliTable *crc32.Table
func init() {
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
}
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
// polynomial may be easily changed in one location at a later time, if necessary.
func newCRC32() hash.Hash32 {
return crc32.New(castagnoliTable)
}
type PostingsEncoder func(*encoding.Encbuf, []uint32) error
type PostingsDecoder func(encoding.Decbuf) (int, Postings, error)
// Writer implements the IndexWriter interface for the standard
// serialization format.
type Writer struct {
ctx context.Context
// For the main index file.
f *FileWriter
// Temporary file for postings.
fP *FileWriter
// Temporary file for posting offsets table.
fPO *FileWriter
cntPO uint64
toc TOC
stage indexWriterStage
postingsStart uint64 // Due to padding, can differ from TOC entry.
// Reusable memory.
buf1 encoding.Encbuf
buf2 encoding.Encbuf
numSymbols int
symbols *Symbols
symbolFile *fileutil.MmapFile
lastSymbol string
symbolCache map[string]uint32 // From symbol to index in table.
labelNames map[string]uint64 // Label names, and their usage.
// Hold last series to validate that clients insert new series in order.
lastSeries labels.Labels
lastSeriesRef storage.SeriesRef
// Hold last added chunk reference to make sure that chunks are ordered properly.
lastChunkRef chunks.ChunkRef
crc32 hash.Hash
Version int
postingsEncoder PostingsEncoder
}
// TOC represents the index Table Of Contents that states where each section of the index starts.
type TOC struct {
Symbols uint64
Series uint64
LabelIndices uint64
LabelIndicesTable uint64
Postings uint64
PostingsTable uint64
}
// NewTOCFromByteSlice returns a parsed TOC from the given index byte slice.
func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
if bs.Len() < indexTOCLen {
return nil, encoding.ErrInvalidSize
}
b := bs.Range(bs.Len()-indexTOCLen, bs.Len())
expCRC := binary.BigEndian.Uint32(b[len(b)-4:])
d := encoding.Decbuf{B: b[:len(b)-4]}
if d.Crc32(castagnoliTable) != expCRC {
return nil, fmt.Errorf("read TOC: %w", encoding.ErrInvalidChecksum)
}
toc := &TOC{
Symbols: d.Be64(),
Series: d.Be64(),
LabelIndices: d.Be64(),
LabelIndicesTable: d.Be64(),
Postings: d.Be64(),
PostingsTable: d.Be64(),
}
return toc, d.Err()
}
// NewWriterWithEncoder returns a new Writer to the given filename. It
// serializes data in format version 2. It uses the given encoder to encode each
// postings list.
func NewWriterWithEncoder(ctx context.Context, fn string, encoder PostingsEncoder) (*Writer, error) {
dir := filepath.Dir(fn)
df, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
defer df.Close() // Close for platform windows.
if err := os.RemoveAll(fn); err != nil {
return nil, fmt.Errorf("remove any existing index at path: %w", err)
}
// Main index file we are building.
f, err := NewFileWriter(fn)
if err != nil {
return nil, err
}
// Temporary file for postings.
fP, err := NewFileWriter(fn + "_tmp_p")
if err != nil {
return nil, err
}
// Temporary file for posting offset table.
fPO, err := NewFileWriter(fn + "_tmp_po")
if err != nil {
return nil, err
}
if err := df.Sync(); err != nil {
return nil, fmt.Errorf("sync dir: %w", err)
}
iw := &Writer{
ctx: ctx,
f: f,
fP: fP,
fPO: fPO,
stage: idxStageNone,
// Reusable memory.
buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
symbolCache: make(map[string]uint32, 1<<16),
labelNames: make(map[string]uint64, 1<<8),
crc32: newCRC32(),
postingsEncoder: encoder,
}
if err := iw.writeMeta(); err != nil {
return nil, err
}
return iw, nil
}
// NewWriter creates a new index writer using the default encoder. See
// NewWriterWithEncoder.
func NewWriter(ctx context.Context, fn string) (*Writer, error) {
return NewWriterWithEncoder(ctx, fn, EncodePostingsRaw)
}
func (w *Writer) write(bufs ...[]byte) error {
return w.f.Write(bufs...)
}
func (w *Writer) writeAt(buf []byte, pos uint64) error {
return w.f.WriteAt(buf, pos)
}
func (w *Writer) addPadding(size int) error {
return w.f.AddPadding(size)
}
type FileWriter struct {
f *os.File
fbuf *bufio.Writer
pos uint64
name string
}
func NewFileWriter(name string) (*FileWriter, error) {
f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666)
if err != nil {
return nil, err
}
return &FileWriter{
f: f,
fbuf: bufio.NewWriterSize(f, 1<<22),
pos: 0,
name: name,
}, nil
}
func (fw *FileWriter) Pos() uint64 {
return fw.pos
}
func (fw *FileWriter) Write(bufs ...[]byte) error {
for _, b := range bufs {
n, err := fw.fbuf.Write(b)
fw.pos += uint64(n)
if err != nil {
return err
}
// For now the index file must not grow beyond 64GiB. Some of the fixed-sized
// offset references in v1 are only 4 bytes large.
// Once we move to compressed/varint representations in those areas, this limitation
// can be lifted.
if fw.pos > 16*math.MaxUint32 {
return fmt.Errorf("%q %w", fw.name, ErrIndexExceeds64GiB)
}
}
return nil
}
func (fw *FileWriter) Flush() error {
return fw.fbuf.Flush()
}
func (fw *FileWriter) WriteAt(buf []byte, pos uint64) error {
if err := fw.Flush(); err != nil {
return err
}
_, err := fw.f.WriteAt(buf, int64(pos))
return err
}
// AddPadding adds zero byte padding until the file size is a multiple size.
func (fw *FileWriter) AddPadding(size int) error {
p := fw.pos % uint64(size)
if p == 0 {
return nil
}
p = uint64(size) - p
if err := fw.Write(make([]byte, p)); err != nil {
return fmt.Errorf("add padding: %w", err)
}
return nil
}
func (fw *FileWriter) Close() error {
if err := fw.Flush(); err != nil {
return err
}
if err := fw.f.Sync(); err != nil {
return err
}
return fw.f.Close()
}
func (fw *FileWriter) Remove() error {
return os.Remove(fw.name)
}
// ensureStage handles transitions between write stages and ensures that IndexWriter
// methods are called in an order valid for the implementation.
func (w *Writer) ensureStage(s indexWriterStage) error {
select {
case <-w.ctx.Done():
return w.ctx.Err()
default:
}
if w.stage == s {
return nil
}
if w.stage < s-1 {
// A stage has been skipped.
if err := w.ensureStage(s - 1); err != nil {
return err
}
}
if w.stage > s {
return fmt.Errorf("invalid stage %q, currently at %q", s, w.stage)
}
// Mark start of sections in table of contents.
switch s {
case idxStageSymbols:
w.toc.Symbols = w.f.pos
if err := w.startSymbols(); err != nil {
return err
}
case idxStageSeries:
if err := w.finishSymbols(); err != nil {
return err
}
w.toc.Series = w.f.pos
case idxStageDone:
w.toc.LabelIndices = w.f.pos
// LabelIndices generation depends on the posting offset
// table produced at this stage.
if err := w.writePostingsToTmpFiles(); err != nil {
return err
}
w.toc.Postings = w.f.pos
if err := w.writePostings(); err != nil {
return err
}
w.toc.LabelIndicesTable = w.f.pos
w.toc.PostingsTable = w.f.pos
if err := w.writePostingsOffsetTable(); err != nil {
return err
}
if err := w.writeTOC(); err != nil {
return err
}
}
w.stage = s
return nil
}
func (w *Writer) writeMeta() error {
w.buf1.Reset()
w.buf1.PutBE32(MagicIndex)
w.buf1.PutByte(FormatV2)
return w.write(w.buf1.Get())
}
// AddSeries adds the series one at a time along with its chunks.
func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...chunks.Meta) error {
if err := w.ensureStage(idxStageSeries); err != nil {
return err
}
if labels.Compare(lset, w.lastSeries) <= 0 {
return fmt.Errorf("out-of-order series added with label set %q, last label set %q", lset, w.lastSeries)
}
if ref < w.lastSeriesRef && !w.lastSeries.IsEmpty() {
return fmt.Errorf("series with reference greater than %d already added", ref)
}
lastChunkRef := w.lastChunkRef
lastMaxT := int64(0)
for ix, c := range chunks {
if c.Ref < lastChunkRef {
return fmt.Errorf("unsorted chunk reference: %d, previous: %d", c.Ref, lastChunkRef)
}
lastChunkRef = c.Ref
if ix > 0 && c.MinTime <= lastMaxT {
return fmt.Errorf("chunk minT %d is not higher than previous chunk maxT %d", c.MinTime, lastMaxT)
}
if c.MaxTime < c.MinTime {
return fmt.Errorf("chunk maxT %d is less than minT %d", c.MaxTime, c.MinTime)
}
lastMaxT = c.MaxTime
}
// We add padding to 16 bytes to increase the addressable space we get through 4 byte
// series references.
if err := w.addPadding(seriesByteAlign); err != nil {
return fmt.Errorf("failed to write padding bytes: %w", err)
}
if w.f.pos%seriesByteAlign != 0 {
return fmt.Errorf("series write not 16-byte aligned at %d", w.f.pos)
}
w.buf2.Reset()
w.buf2.PutUvarint(lset.Len())
if err := lset.Validate(func(l labels.Label) error {
nameIndex, ok := w.symbolCache[l.Name]
if !ok {
return fmt.Errorf("symbol entry for %q does not exist", l.Name)
}
w.labelNames[l.Name]++
w.buf2.PutUvarint32(nameIndex)
valueIndex, ok := w.symbolCache[l.Value]
if !ok {
return fmt.Errorf("symbol entry for %q does not exist", l.Value)
}
w.buf2.PutUvarint32(valueIndex)
return nil
}); err != nil {
return err
}
w.buf2.PutUvarint(len(chunks))
if len(chunks) > 0 {
c := chunks[0]
w.buf2.PutVarint64(c.MinTime)
w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime))
w.buf2.PutUvarint64(uint64(c.Ref))
t0 := c.MaxTime
ref0 := int64(c.Ref)
for _, c := range chunks[1:] {
w.buf2.PutUvarint64(uint64(c.MinTime - t0))
w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime))
t0 = c.MaxTime
w.buf2.PutVarint64(int64(c.Ref) - ref0)
ref0 = int64(c.Ref)
}
}
w.buf1.Reset()
w.buf1.PutUvarint(w.buf2.Len())
w.buf2.PutHash(w.crc32)
if err := w.write(w.buf1.Get(), w.buf2.Get()); err != nil {
return fmt.Errorf("write series data: %w", err)
}
w.lastSeries.CopyFrom(lset)
w.lastSeriesRef = ref
w.lastChunkRef = lastChunkRef
return nil
}
func (w *Writer) startSymbols() error {
// We are at w.toc.Symbols.
// Leave 4 bytes of space for the length, and another 4 for the number of symbols
// which will both be calculated later.
return w.write([]byte("alenblen"))
}
func (w *Writer) AddSymbol(sym string) error {
if err := w.ensureStage(idxStageSymbols); err != nil {
return err
}
if w.numSymbols != 0 && sym <= w.lastSymbol {
return fmt.Errorf("symbol %q out-of-order", sym)
}
w.lastSymbol = sym
w.symbolCache[sym] = uint32(w.numSymbols)
w.numSymbols++
w.buf1.Reset()
w.buf1.PutUvarintStr(sym)
return w.write(w.buf1.Get())
}
func (w *Writer) finishSymbols() error {
symbolTableSize := w.f.pos - w.toc.Symbols - 4
// The symbol table's <len> part is 4 bytes. So the total symbol table size must be less than or equal to 2^32-1
if symbolTableSize > math.MaxUint32 {
return fmt.Errorf("symbol table size exceeds %d bytes: %d", uint32(math.MaxUint32), symbolTableSize)
}
// Write out the length and symbol count.
w.buf1.Reset()
w.buf1.PutBE32int(int(symbolTableSize))
w.buf1.PutBE32int(w.numSymbols)
if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil {
return err
}
hashPos := w.f.pos
// Leave space for the hash. We can only calculate it
// now that the number of symbols is known, so mmap and do it from there.
if err := w.write([]byte("hash")); err != nil {
return err
}
if err := w.f.Flush(); err != nil {
return err
}
sf, err := fileutil.OpenMmapFile(w.f.name)
if err != nil {
return err
}
w.symbolFile = sf
hash := crc32.Checksum(w.symbolFile.Bytes()[w.toc.Symbols+4:hashPos], castagnoliTable)
w.buf1.Reset()
w.buf1.PutBE32(hash)
if err := w.writeAt(w.buf1.Get(), hashPos); err != nil {
return err
}
// Load in the symbol table efficiently for the rest of the index writing.
w.symbols, err = NewSymbols(realByteSlice(w.symbolFile.Bytes()), FormatV2, int(w.toc.Symbols))
if err != nil {
return fmt.Errorf("read symbols: %w", err)
}
return nil
}
// writePostingsOffsetTable writes the postings offset table.
func (w *Writer) writePostingsOffsetTable() error {
// Ensure everything is in the temporary file.
if err := w.fPO.Flush(); err != nil {
return err
}
startPos := w.f.pos
// Leave 4 bytes of space for the length, which will be calculated later.
if err := w.write([]byte("alen")); err != nil {
return err
}
// Copy over the tmp posting offset table, however we need to
// adjust the offsets.
adjustment := w.postingsStart
w.buf1.Reset()
w.crc32.Reset()
w.buf1.PutBE32int(int(w.cntPO)) // Count.
w.buf1.WriteToHash(w.crc32)
if err := w.write(w.buf1.Get()); err != nil {
return err
}
f, err := fileutil.OpenMmapFile(w.fPO.name)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
}
}()
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
cnt := w.cntPO
for d.Err() == nil && cnt > 0 {
w.buf1.Reset()
w.buf1.PutUvarint(d.Uvarint()) // Keycount.
w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label name.
w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label value.
w.buf1.PutUvarint64(d.Uvarint64() + adjustment) // Offset.
w.buf1.WriteToHash(w.crc32)
if err := w.write(w.buf1.Get()); err != nil {
return err
}
cnt--
}
if d.Err() != nil {
return d.Err()
}
// Cleanup temporary file.
if err := f.Close(); err != nil {
return err
}
f = nil
if err := w.fPO.Close(); err != nil {
return err
}
if err := w.fPO.Remove(); err != nil {
return err
}
w.fPO = nil
err = w.writeLengthAndHash(startPos)
if err != nil {
return fmt.Errorf("postings offset table length/crc32 write error: %w", err)
}
return nil
}
func (w *Writer) writeLengthAndHash(startPos uint64) error {
w.buf1.Reset()
l := w.f.pos - startPos - 4
if l > math.MaxUint32 {
return fmt.Errorf("%w: %d", ErrPostingsOffsetTableTooLarge, l)
}
w.buf1.PutBE32int(int(l))
if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
return fmt.Errorf("write length from buffer error: %w", err)
}
// Write out the hash.
w.buf1.Reset()
w.buf1.PutHashSum(w.crc32)
if err := w.write(w.buf1.Get()); err != nil {
return fmt.Errorf("write buffer's crc32 error: %w", err)
}
return nil
}
const indexTOCLen = 6*8 + crc32.Size
func (w *Writer) writeTOC() error {
w.buf1.Reset()
w.buf1.PutBE64(w.toc.Symbols)
w.buf1.PutBE64(w.toc.Series)
w.buf1.PutBE64(w.toc.LabelIndices)
w.buf1.PutBE64(w.toc.LabelIndicesTable)
w.buf1.PutBE64(w.toc.Postings)
w.buf1.PutBE64(w.toc.PostingsTable)
w.buf1.PutHash(w.crc32)
return w.write(w.buf1.Get())
}
func (w *Writer) writePostingsToTmpFiles() error {
names := make([]string, 0, len(w.labelNames))
for n := range w.labelNames {
names = append(names, n)
}
slices.Sort(names)
if err := w.f.Flush(); err != nil {
return err
}
f, err := fileutil.OpenMmapFile(w.f.name)
if err != nil {
return err
}
defer f.Close()
// Write out the special all posting.
offsets := []uint32{}
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))
d.Skip(int(w.toc.Series))
for d.Len() > 0 {
d.ConsumePadding()
startPos := w.toc.LabelIndices - uint64(d.Len())
if startPos%seriesByteAlign != 0 {
return fmt.Errorf("series not 16-byte aligned at %d", startPos)
}
offsets = append(offsets, uint32(startPos/seriesByteAlign))
// Skip to next series.
x := d.Uvarint()
d.Skip(x + crc32.Size)
if err := d.Err(); err != nil {
return err
}
}
if err := w.writePosting("", "", offsets); err != nil {
return err
}
maxPostings := uint64(len(offsets)) // No label name can have more postings than this.
for len(names) > 0 {
batchNames := []string{}
var c uint64
// Try to bunch up label names into one loop, but avoid
// using more memory than a single label name can.
for len(names) > 0 {
if w.labelNames[names[0]]+c > maxPostings {
if c > 0 {
break
}
return fmt.Errorf("corruption detected when writing postings to index: label %q has %d uses, but maxPostings is %d", names[0], w.labelNames[names[0]], maxPostings)
}
batchNames = append(batchNames, names[0])
c += w.labelNames[names[0]]
names = names[1:]
}
nameSymbols := map[uint32]string{}
for _, name := range batchNames {
sid, ok := w.symbolCache[name]
if !ok {
return fmt.Errorf("symbol entry for %q does not exist", name)
}
nameSymbols[sid] = name
}
// Label name -> label value -> positions.
postings := map[uint32]map[uint32][]uint32{}
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))
d.Skip(int(w.toc.Series))
for d.Len() > 0 {
d.ConsumePadding()
startPos := w.toc.LabelIndices - uint64(d.Len())
l := d.Uvarint() // Length of this series in bytes.
startLen := d.Len()
// See if label names we want are in the series.
numLabels := d.Uvarint()
for range numLabels {
lno := uint32(d.Uvarint())
lvo := uint32(d.Uvarint())
if _, ok := nameSymbols[lno]; ok {
if _, ok := postings[lno]; !ok {
postings[lno] = map[uint32][]uint32{}
}
postings[lno][lvo] = append(postings[lno][lvo], uint32(startPos/seriesByteAlign))
}
}
// Skip to next series.
d.Skip(l - (startLen - d.Len()) + crc32.Size)
if err := d.Err(); err != nil {
return err
}
}
for _, name := range batchNames {
// Write out postings for this label name.
sid, ok := w.symbolCache[name]
if !ok {
return fmt.Errorf("symbol entry for %q does not exist", name)
}
values := make([]uint32, 0, len(postings[sid]))
for v := range postings[sid] {
values = append(values, v)
}
// Symbol numbers are in order, so the strings will also be in order.
slices.Sort(values)
for _, v := range values {
value, err := w.symbols.Lookup(v)
if err != nil {
return err
}
if err := w.writePosting(name, value, postings[sid][v]); err != nil {
return err
}
}
}
select {
case <-w.ctx.Done():
return w.ctx.Err()
default:
}
}
return nil
}
// EncodePostingsRaw uses the "basic" postings list encoding format with no compression:
// <BE uint32 len X><BE uint32 0><BE uint32 1>...<BE uint32 X-1>.
func EncodePostingsRaw(e *encoding.Encbuf, offs []uint32) error {
e.PutBE32int(len(offs))
for _, off := range offs {
if off > (1<<32)-1 {
return fmt.Errorf("series offset %d exceeds 4 bytes", off)
}
e.PutBE32(off)
}
return nil
}
func (w *Writer) writePosting(name, value string, offs []uint32) error {
// Align beginning to 4 bytes for more efficient postings list scans.
if err := w.fP.AddPadding(4); err != nil {
return err
}
// Write out postings offset table to temporary file as we go.
w.buf1.Reset()
w.buf1.PutUvarint(2)
w.buf1.PutUvarintStr(name)
w.buf1.PutUvarintStr(value)
w.buf1.PutUvarint64(w.fP.pos) // This is relative to the postings tmp file, not the final index file.
if err := w.fPO.Write(w.buf1.Get()); err != nil {
return err
}
w.cntPO++
w.buf1.Reset()
if err := w.postingsEncoder(&w.buf1, offs); err != nil {
return err
}
w.buf2.Reset()
l := w.buf1.Len()
// We convert to uint to make code compile on 32-bit systems, as math.MaxUint32 doesn't fit into int there.
if uint(l) > math.MaxUint32 {
return fmt.Errorf("posting size exceeds 4 bytes: %d", l)
}
w.buf2.PutBE32int(l)
w.buf1.PutHash(w.crc32)
return w.fP.Write(w.buf2.Get(), w.buf1.Get())
}
func (w *Writer) writePostings() error {
// There's padding in the tmp file, make sure it actually works.
if err := w.f.AddPadding(4); err != nil {
return err
}
w.postingsStart = w.f.pos
// Copy temporary file into main index.
if err := w.fP.Flush(); err != nil {
return err
}
if _, err := w.fP.f.Seek(0, 0); err != nil {
return err
}
// Don't need to calculate a checksum, so can copy directly.
n, err := io.CopyBuffer(w.f.fbuf, w.fP.f, make([]byte, 1<<20))
if err != nil {
return err
}
if uint64(n) != w.fP.pos {
return fmt.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n)
}
w.f.pos += uint64(n)
if err := w.fP.Close(); err != nil {
return err
}
if err := w.fP.Remove(); err != nil {
return err
}
w.fP = nil
return nil
}
func (w *Writer) Close() error {
// Even if this fails, we need to close all the files.
ensureErr := w.ensureStage(idxStageDone)
if w.symbolFile != nil {
if err := w.symbolFile.Close(); err != nil {
return err
}
}
if w.fP != nil {
if err := w.fP.Close(); err != nil {
return err
}
}
if w.fPO != nil {
if err := w.fPO.Close(); err != nil {
return err
}
}
if err := w.f.Close(); err != nil {
return err
}
return ensureErr
}
// StringIter iterates over a sorted list of strings.
type StringIter interface {
// Next advances the iterator and returns true if another value was found.
Next() bool
// At returns the value at the current iterator position.
At() string
// Err returns the last error of the iterator.
Err() error
}
type Reader struct {
b ByteSlice
toc *TOC
// Close that releases the underlying resources of the byte slice.
c io.Closer
// Map of LabelName to a list of some LabelValues's position in the offset table.
// The first and last values for each name are always present.
postings map[string][]postingOffset
// For the v1 format, labelname -> labelvalue -> offset.
postingsV1 map[string]map[string]uint64
symbols *Symbols
nameSymbols map[uint32]string // Cache of the label name symbol lookups,
// as there are not many and they are half of all lookups.
st *labels.SymbolTable // TODO: see if we can merge this with nameSymbols.
dec *Decoder
version int
}
type postingOffset struct {
value string
off int
}
// ByteSlice abstracts a byte slice.
type ByteSlice interface {
Len() int
Range(start, end int) []byte
}
type realByteSlice []byte
func (b realByteSlice) Len() int {
return len(b)
}
func (b realByteSlice) Range(start, end int) []byte {
return b[start:end]
}
func (b realByteSlice) Sub(start, end int) ByteSlice {
return b[start:end]
}
// NewReader returns a new index reader on the given byte slice. It automatically
// handles different format versions.
func NewReader(b ByteSlice, decoder PostingsDecoder) (*Reader, error) {
return newReader(b, io.NopCloser(nil), decoder)
}
// NewFileReader returns a new index reader against the given index file.
func NewFileReader(path string, decoder PostingsDecoder) (*Reader, error) {
f, err := fileutil.OpenMmapFile(path)
if err != nil {
return nil, err
}
r, err := newReader(realByteSlice(f.Bytes()), f, decoder)
if err != nil {
return nil, tsdb_errors.NewMulti(
err,
f.Close(),
).Err()
}
return r, nil
}
func newReader(b ByteSlice, c io.Closer, postingsDecoder PostingsDecoder) (*Reader, error) {
r := &Reader{
b: b,
c: c,
postings: map[string][]postingOffset{},
st: labels.NewSymbolTable(),
}
// Verify header.
if r.b.Len() < HeaderLen {
return nil, fmt.Errorf("index header: %w", encoding.ErrInvalidSize)
}
if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex {
return nil, fmt.Errorf("invalid magic number %x", m)
}
r.version = int(r.b.Range(4, 5)[0])
switch r.version {
case FormatV1, FormatV2, FormatV3:
default:
return nil, fmt.Errorf("unknown index file version %d", r.version)
}
var err error
r.toc, err = NewTOCFromByteSlice(b)
if err != nil {
return nil, fmt.Errorf("read TOC: %w", err)
}
r.symbols, err = NewSymbols(r.b, r.version, int(r.toc.Symbols))
if err != nil {
return nil, fmt.Errorf("read symbols: %w", err)
}
if r.version == FormatV1 {
// Earlier V1 formats don't have a sorted postings offset table, so
// load the whole offset table into memory.
r.postingsV1 = map[string]map[string]uint64{}
if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error {
if _, ok := r.postingsV1[string(name)]; !ok {
r.postingsV1[string(name)] = map[string]uint64{}
r.postings[string(name)] = nil // Used to get a list of labelnames in places.
}
r.postingsV1[string(name)][string(value)] = off
return nil
}); err != nil {
return nil, fmt.Errorf("read postings table: %w", err)
}
} else {
var lastName, lastValue []byte
lastOff := 0
valueCount := 0
// For the postings offset table we keep every label name but only every nth
// label value (plus the first and last one), to save memory.
if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, _ uint64, off int) error {
if _, ok := r.postings[string(name)]; !ok {
// Next label name.
r.postings[string(name)] = []postingOffset{}
if lastName != nil {
// Always include last value for each label name.
r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff})
}
valueCount = 0
}
if valueCount%symbolFactor == 0 {
r.postings[string(name)] = append(r.postings[string(name)], postingOffset{value: string(value), off: off})
lastName, lastValue = nil, nil
} else {
lastName, lastValue = name, value
lastOff = off
}
valueCount++
return nil
}); err != nil {
return nil, fmt.Errorf("read postings table: %w", err)
}
if lastName != nil {
r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff})
}
// Trim any extra space in the slices.
for k, v := range r.postings {
l := make([]postingOffset, len(v))
copy(l, v)
r.postings[k] = l
}
}
r.nameSymbols = make(map[uint32]string, len(r.postings))
for k := range r.postings {
if k == "" {
continue
}
off, err := r.symbols.ReverseLookup(k)
if err != nil {
return nil, fmt.Errorf("reverse symbol lookup: %w", err)
}
r.nameSymbols[off] = k
}
r.dec = &Decoder{LookupSymbol: r.lookupSymbol, DecodePostings: postingsDecoder}
return r, nil
}
// Version returns the file format version of the underlying index.
func (r *Reader) Version() int {
return r.version
}
// Range marks a byte range.
type Range struct {
Start, End int64
}
// PostingsRanges returns a new map of byte range in the underlying index file
// for all postings lists.
func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) {
m := map[labels.Label]Range{}
if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error {
d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable)
if d.Err() != nil {
return d.Err()
}
m[labels.Label{Name: string(name), Value: string(value)}] = Range{
Start: int64(off) + 4,
End: int64(off) + 4 + int64(d.Len()),
}
return nil
}); err != nil {
return nil, fmt.Errorf("read postings table: %w", err)
}
return m, nil
}
type Symbols struct {
bs ByteSlice
version int
off int
offsets []int
seen int
}
const symbolFactor = 32
// NewSymbols returns a Symbols object for symbol lookups.
func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) {
s := &Symbols{
bs: bs,
version: version,
off: off,
}
d := encoding.NewDecbufAt(bs, off, castagnoliTable)
var (
origLen = d.Len()
cnt = d.Be32int()
basePos = off + 4
)
s.offsets = make([]int, 0, 1+cnt/symbolFactor)
for d.Err() == nil && s.seen < cnt {
if s.seen%symbolFactor == 0 {
s.offsets = append(s.offsets, basePos+origLen-d.Len())
}
d.UvarintBytes() // The symbol.
s.seen++
}
if d.Err() != nil {
return nil, d.Err()
}
return s, nil
}
func (s Symbols) Lookup(o uint32) (string, error) {
d := encoding.Decbuf{
B: s.bs.Range(0, s.bs.Len()),
}
if s.version == FormatV1 {
d.Skip(int(o))
} else {
if int(o) >= s.seen {
return "", fmt.Errorf("unknown symbol offset %d", o)
}
d.Skip(s.offsets[int(o/symbolFactor)])
// Walk until we find the one we want.
for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- {
d.UvarintBytes()
}
}
sym := d.UvarintStr()
if d.Err() != nil {
return "", d.Err()
}
return sym, nil
}
func (s Symbols) ReverseLookup(sym string) (uint32, error) {
if len(s.offsets) == 0 {
return 0, fmt.Errorf("unknown symbol %q - no symbols", sym)
}
i := sort.Search(len(s.offsets), func(i int) bool {
// Any decoding errors here will be lost, however
// we already read through all of this at startup.
d := encoding.Decbuf{
B: s.bs.Range(0, s.bs.Len()),
}
d.Skip(s.offsets[i])
return yoloString(d.UvarintBytes()) > sym
})
d := encoding.Decbuf{
B: s.bs.Range(0, s.bs.Len()),
}
if i > 0 {
i--
}
d.Skip(s.offsets[i])
res := i * symbolFactor
var lastLen int
var lastSymbol string
for d.Err() == nil && res <= s.seen {
lastLen = d.Len()
lastSymbol = yoloString(d.UvarintBytes())
if lastSymbol >= sym {
break
}
res++
}
if d.Err() != nil {
return 0, d.Err()
}
if lastSymbol != sym {
return 0, fmt.Errorf("unknown symbol %q", sym)
}
if s.version == FormatV1 {
return uint32(s.bs.Len() - lastLen), nil
}
return uint32(res), nil
}
func (s Symbols) Size() int {
return len(s.offsets) * 8
}
func (s Symbols) Iter() StringIter {
d := encoding.NewDecbufAt(s.bs, s.off, castagnoliTable)
cnt := d.Be32int()
return &symbolsIter{
d: d,
cnt: cnt,
}
}
// symbolsIter implements StringIter.
type symbolsIter struct {
d encoding.Decbuf
cnt int
cur string
err error
}
func (s *symbolsIter) Next() bool {
if s.cnt == 0 || s.err != nil {
return false
}
s.cur = yoloString(s.d.UvarintBytes())
s.cnt--
if s.d.Err() != nil {
s.err = s.d.Err()
return false
}
return true
}
func (s symbolsIter) At() string { return s.cur }
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/index/postingsstats.go | tsdb/index/postingsstats.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"math"
"slices"
)
// Stat holds values for a single cardinality statistic.
type Stat struct {
Name string
Count uint64
}
type maxHeap struct {
maxLength int
minValue uint64
minIndex int
Items []Stat
}
func (m *maxHeap) init(length int) {
m.maxLength = length
m.minValue = math.MaxUint64
m.Items = make([]Stat, 0, length)
}
func (m *maxHeap) push(item Stat) {
if len(m.Items) < m.maxLength {
if item.Count < m.minValue {
m.minValue = item.Count
m.minIndex = len(m.Items)
}
m.Items = append(m.Items, item)
return
}
if item.Count < m.minValue {
return
}
m.Items[m.minIndex] = item
m.minValue = item.Count
for i, stat := range m.Items {
if stat.Count < m.minValue {
m.minValue = stat.Count
m.minIndex = i
}
}
}
func (m *maxHeap) get() []Stat {
slices.SortFunc(m.Items, func(a, b Stat) int {
switch {
case b.Count < a.Count:
return -1
case b.Count > a.Count:
return 1
default:
return 0
}
})
return m.Items
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/index/postings_test.go | tsdb/index/postings_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"container/heap"
"context"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"slices"
"sort"
"strconv"
"strings"
"sync"
"testing"
"github.com/grafana/regexp"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMemPostings_addFor(t *testing.T) {
p := NewMemPostings()
p.m[allPostingsKey.Name] = map[string][]storage.SeriesRef{}
p.m[allPostingsKey.Name][allPostingsKey.Value] = []storage.SeriesRef{1, 2, 3, 4, 6, 7, 8}
p.addFor(5, allPostingsKey)
require.Equal(t, []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8}, p.m[allPostingsKey.Name][allPostingsKey.Value])
}
func TestMemPostings_ensureOrder(t *testing.T) {
p := NewUnorderedMemPostings()
p.m["a"] = map[string][]storage.SeriesRef{}
for i := range 100 {
l := make([]storage.SeriesRef, 100)
for j := range l {
l[j] = storage.SeriesRef(rand.Uint64())
}
v := strconv.Itoa(i)
p.m["a"][v] = l
}
p.EnsureOrder(0)
for _, e := range p.m {
for _, l := range e {
ok := slices.IsSorted(l)
require.True(t, ok, "postings list %v is not sorted", l)
}
}
}
func BenchmarkMemPostings_ensureOrder(b *testing.B) {
tests := map[string]struct {
numLabels int
numValuesPerLabel int
numRefsPerValue int
}{
"many values per label": {
numLabels: 100,
numValuesPerLabel: 10000,
numRefsPerValue: 100,
},
"few values per label": {
numLabels: 1000000,
numValuesPerLabel: 1,
numRefsPerValue: 100,
},
"few refs per label value": {
numLabels: 1000,
numValuesPerLabel: 1000,
numRefsPerValue: 10,
},
}
for testName, testData := range tests {
b.Run(testName, func(b *testing.B) {
p := NewUnorderedMemPostings()
// Generate postings.
for l := 0; l < testData.numLabels; l++ {
labelName := strconv.Itoa(l)
p.m[labelName] = map[string][]storage.SeriesRef{}
for v := 0; v < testData.numValuesPerLabel; v++ {
refs := make([]storage.SeriesRef, testData.numRefsPerValue)
for j := range refs {
refs[j] = storage.SeriesRef(rand.Uint64())
}
labelValue := strconv.Itoa(v)
p.m[labelName][labelValue] = refs
}
}
b.ResetTimer()
for b.Loop() {
p.EnsureOrder(0)
p.ordered = false
}
})
}
}
func TestIntersect(t *testing.T) {
a := newListPostings(1, 2, 3)
b := newListPostings(2, 3, 4)
cases := []struct {
in []Postings
res Postings
}{
{
in: []Postings{},
res: EmptyPostings(),
},
{
in: []Postings{a, b, EmptyPostings()},
res: EmptyPostings(),
},
{
in: []Postings{b, a, EmptyPostings()},
res: EmptyPostings(),
},
{
in: []Postings{EmptyPostings(), b, a},
res: EmptyPostings(),
},
{
in: []Postings{EmptyPostings(), a, b},
res: EmptyPostings(),
},
{
in: []Postings{a, EmptyPostings(), b},
res: EmptyPostings(),
},
{
in: []Postings{b, EmptyPostings(), a},
res: EmptyPostings(),
},
{
in: []Postings{b, EmptyPostings(), a, a, b, a, a, a},
res: EmptyPostings(),
},
{
in: []Postings{
newListPostings(1, 2, 3, 4, 5),
newListPostings(6, 7, 8, 9, 10),
},
res: newListPostings(),
},
{
in: []Postings{
newListPostings(1, 2, 3, 4, 5),
newListPostings(4, 5, 6, 7, 8),
},
res: newListPostings(4, 5),
},
{
in: []Postings{
newListPostings(1, 2, 3, 4, 9, 10),
newListPostings(1, 4, 5, 6, 7, 8, 10, 11),
},
res: newListPostings(1, 4, 10),
},
{
in: []Postings{
newListPostings(1),
newListPostings(0, 1),
},
res: newListPostings(1),
},
{
in: []Postings{
newListPostings(1),
},
res: newListPostings(1),
},
{
in: []Postings{
newListPostings(1),
newListPostings(),
},
res: newListPostings(),
},
{
in: []Postings{
newListPostings(),
newListPostings(),
},
res: newListPostings(),
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
require.NotNil(t, c.res, "intersect result expectancy cannot be nil")
expected, err := ExpandPostings(c.res)
require.NoError(t, err)
i := Intersect(c.in...)
if c.res == EmptyPostings() {
require.Equal(t, EmptyPostings(), i)
return
}
require.NotEqual(t, EmptyPostings(), i, "intersect unexpected result: EmptyPostings sentinel")
res, err := ExpandPostings(i)
require.NoError(t, err)
require.Equal(t, expected, res)
})
}
}
func TestMultiIntersect(t *testing.T) {
cases := []struct {
p [][]storage.SeriesRef
res []storage.SeriesRef
}{
{
p: [][]storage.SeriesRef{
{1, 2, 3, 4, 5, 6, 1000, 1001},
{2, 4, 5, 6, 7, 8, 999, 1001},
{1, 2, 5, 6, 7, 8, 1001, 1200},
},
res: []storage.SeriesRef{2, 5, 6, 1001},
},
// One of the reproducible cases for:
// https://github.com/prometheus/prometheus/issues/2616
// The initialisation of intersectPostings was moving the iterator forward
// prematurely making us miss some postings.
{
p: [][]storage.SeriesRef{
{1, 2},
{1, 2},
{1, 2},
{2},
},
res: []storage.SeriesRef{2},
},
}
for _, c := range cases {
ps := make([]Postings, 0, len(c.p))
for _, postings := range c.p {
ps = append(ps, newListPostings(postings...))
}
res, err := ExpandPostings(Intersect(ps...))
require.NoError(t, err)
require.Equal(t, c.res, res)
}
}
func consumePostings(p Postings) error {
for p.Next() {
p.At()
}
return p.Err()
}
func newListPostings(list ...storage.SeriesRef) *listPostings {
if !slices.IsSorted(list) {
panic("newListPostings: list is not sorted")
}
return &listPostings{list: list}
}
// Create ListPostings for a benchmark, collecting the original sets of references
// so they can be reset without additional memory allocations.
func createPostings(lps *[]*listPostings, refs *[][]storage.SeriesRef, params ...storage.SeriesRef) {
var temp []storage.SeriesRef
for i := 0; i < len(params); i += 3 {
for j := params[i]; j < params[i+1]; j += params[i+2] {
temp = append(temp, j)
}
}
*lps = append(*lps, newListPostings(temp...))
*refs = append(*refs, temp)
}
// Reset the ListPostings to their original values each time round the benchmark loop.
func resetPostings(its []Postings, lps []*listPostings, refs [][]storage.SeriesRef) {
for j := range refs {
lps[j].list = refs[j]
its[j] = lps[j]
}
}
func BenchmarkIntersect(t *testing.B) {
t.Run("LongPostings1", func(bench *testing.B) {
var lps []*listPostings
var refs [][]storage.SeriesRef
createPostings(&lps, &refs, 0, 10000000, 2)
createPostings(&lps, &refs, 5000000, 5000100, 4, 5090000, 5090600, 4)
createPostings(&lps, &refs, 4990000, 5100000, 1)
createPostings(&lps, &refs, 4000000, 6000000, 1)
its := make([]Postings, len(refs))
bench.ResetTimer()
bench.ReportAllocs()
for bench.Loop() {
resetPostings(its, lps, refs)
if err := consumePostings(Intersect(its...)); err != nil {
bench.Fatal(err)
}
}
})
t.Run("LongPostings2", func(bench *testing.B) {
var lps []*listPostings
var refs [][]storage.SeriesRef
createPostings(&lps, &refs, 0, 12500000, 1)
createPostings(&lps, &refs, 7500000, 12500000, 1)
createPostings(&lps, &refs, 9000000, 20000000, 1)
createPostings(&lps, &refs, 10000000, 12000000, 1)
its := make([]Postings, len(refs))
bench.ResetTimer()
bench.ReportAllocs()
for bench.Loop() {
resetPostings(its, lps, refs)
if err := consumePostings(Intersect(its...)); err != nil {
bench.Fatal(err)
}
}
})
t.Run("ManyPostings", func(bench *testing.B) {
var lps []*listPostings
var refs [][]storage.SeriesRef
for range 100 {
createPostings(&lps, &refs, 1, 100, 1)
}
its := make([]Postings, len(refs))
bench.ResetTimer()
bench.ReportAllocs()
for bench.Loop() {
resetPostings(its, lps, refs)
if err := consumePostings(Intersect(its...)); err != nil {
bench.Fatal(err)
}
}
})
}
func BenchmarkMerge(t *testing.B) {
var lps []*listPostings
var refs [][]storage.SeriesRef
// Create 100000 matchers(k=100000), making sure all memory allocation is done before starting the loop.
for i := range 100000 {
var temp []storage.SeriesRef
for j := 1; j < 100; j++ {
temp = append(temp, storage.SeriesRef(i+j*100000))
}
lps = append(lps, newListPostings(temp...))
refs = append(refs, temp)
}
its := make([]*listPostings, len(refs))
for _, nSeries := range []int{1, 10, 10000, 100000} {
t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
ctx := context.Background()
for bench.Loop() {
// Reset the ListPostings to their original values each time round the loop.
for j := range refs[:nSeries] {
lps[j].list = refs[j]
its[j] = lps[j]
}
if err := consumePostings(Merge(ctx, its[:nSeries]...)); err != nil {
bench.Fatal(err)
}
}
})
}
}
func TestMultiMerge(t *testing.T) {
i1 := newListPostings(1, 2, 3, 4, 5, 6, 1000, 1001)
i2 := newListPostings(2, 4, 5, 6, 7, 8, 999, 1001)
i3 := newListPostings(1, 2, 5, 6, 7, 8, 1001, 1200)
res, err := ExpandPostings(Merge(context.Background(), i1, i2, i3))
require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8, 999, 1000, 1001, 1200}, res)
}
func TestMergedPostings(t *testing.T) {
cases := []struct {
in []Postings
res Postings
}{
{
in: []Postings{},
res: EmptyPostings(),
},
{
in: []Postings{
newListPostings(),
newListPostings(),
},
res: EmptyPostings(),
},
{
in: []Postings{
newListPostings(),
},
res: newListPostings(),
},
{
in: []Postings{
EmptyPostings(),
EmptyPostings(),
EmptyPostings(),
EmptyPostings(),
},
res: EmptyPostings(),
},
{
in: []Postings{
newListPostings(1, 2, 3, 4, 5),
newListPostings(6, 7, 8, 9, 10),
},
res: newListPostings(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
},
{
in: []Postings{
newListPostings(1, 2, 3, 4, 5),
newListPostings(4, 5, 6, 7, 8),
},
res: newListPostings(1, 2, 3, 4, 5, 6, 7, 8),
},
{
in: []Postings{
newListPostings(1, 2, 3, 4, 9, 10),
newListPostings(1, 4, 5, 6, 7, 8, 10, 11),
},
res: newListPostings(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
},
{
in: []Postings{
newListPostings(1, 2, 3, 4, 9, 10),
EmptyPostings(),
newListPostings(1, 4, 5, 6, 7, 8, 10, 11),
},
res: newListPostings(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
},
{
in: []Postings{
newListPostings(1, 2),
newListPostings(),
},
res: newListPostings(1, 2),
},
{
in: []Postings{
newListPostings(1, 2),
EmptyPostings(),
},
res: newListPostings(1, 2),
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
require.NotNil(t, c.res, "merge result expectancy cannot be nil")
ctx := context.Background()
expected, err := ExpandPostings(c.res)
require.NoError(t, err)
m := Merge(ctx, c.in...)
if c.res == EmptyPostings() {
require.False(t, m.Next())
return
}
require.NotEqual(t, EmptyPostings(), m, "merge unexpected result: EmptyPostings sentinel")
res, err := ExpandPostings(m)
require.NoError(t, err)
require.Equal(t, expected, res)
})
}
}
func TestMergedPostingsSeek(t *testing.T) {
cases := []struct {
a, b []storage.SeriesRef
seek storage.SeriesRef
success bool
res []storage.SeriesRef
}{
{
a: []storage.SeriesRef{2, 3, 4, 5},
b: []storage.SeriesRef{6, 7, 8, 9, 10},
seek: 1,
success: true,
res: []storage.SeriesRef{2, 3, 4, 5, 6, 7, 8, 9, 10},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 5},
b: []storage.SeriesRef{6, 7, 8, 9, 10},
seek: 2,
success: true,
res: []storage.SeriesRef{2, 3, 4, 5, 6, 7, 8, 9, 10},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 5},
b: []storage.SeriesRef{4, 5, 6, 7, 8},
seek: 9,
success: false,
res: nil,
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 9, 10},
b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 10, 11},
seek: 10,
success: true,
res: []storage.SeriesRef{10, 11},
},
}
for _, c := range cases {
ctx := context.Background()
a := newListPostings(c.a...)
b := newListPostings(c.b...)
p := Merge(ctx, a, b)
require.Equal(t, c.success, p.Seek(c.seek))
// After Seek(), At() should be called.
if c.success {
start := p.At()
lst, err := ExpandPostings(p)
require.NoError(t, err)
lst = append([]storage.SeriesRef{start}, lst...)
require.Equal(t, c.res, lst)
}
}
}
func TestRemovedPostings(t *testing.T) {
cases := []struct {
a, b []storage.SeriesRef
res []storage.SeriesRef
}{
{
a: nil,
b: nil,
res: []storage.SeriesRef(nil),
},
{
a: []storage.SeriesRef{1, 2, 3, 4},
b: nil,
res: []storage.SeriesRef{1, 2, 3, 4},
},
{
a: nil,
b: []storage.SeriesRef{1, 2, 3, 4},
res: []storage.SeriesRef(nil),
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 5},
b: []storage.SeriesRef{6, 7, 8, 9, 10},
res: []storage.SeriesRef{1, 2, 3, 4, 5},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 5},
b: []storage.SeriesRef{4, 5, 6, 7, 8},
res: []storage.SeriesRef{1, 2, 3},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 9, 10},
b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 10, 11},
res: []storage.SeriesRef{2, 3, 9},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 9, 10},
b: []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
res: []storage.SeriesRef(nil),
},
}
for _, c := range cases {
a := newListPostings(c.a...)
b := newListPostings(c.b...)
res, err := ExpandPostings(newRemovedPostings(a, b))
require.NoError(t, err)
require.Equal(t, c.res, res)
}
}
func TestRemovedNextStackoverflow(t *testing.T) {
var full []storage.SeriesRef
var remove []storage.SeriesRef
for i := range storage.SeriesRef(1e7) {
full = append(full, i)
remove = append(remove, i)
}
flp := newListPostings(full...)
rlp := newListPostings(remove...)
rp := newRemovedPostings(flp, rlp)
gotElem := false
for rp.Next() {
gotElem = true
}
require.NoError(t, rp.Err())
require.False(t, gotElem)
}
func TestRemovedPostingsSeek(t *testing.T) {
cases := []struct {
a, b []storage.SeriesRef
seek storage.SeriesRef
success bool
res []storage.SeriesRef
}{
{
a: []storage.SeriesRef{2, 3, 4, 5},
b: []storage.SeriesRef{6, 7, 8, 9, 10},
seek: 1,
success: true,
res: []storage.SeriesRef{2, 3, 4, 5},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 5},
b: []storage.SeriesRef{6, 7, 8, 9, 10},
seek: 2,
success: true,
res: []storage.SeriesRef{2, 3, 4, 5},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 5},
b: []storage.SeriesRef{4, 5, 6, 7, 8},
seek: 9,
success: false,
res: nil,
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 9, 10},
b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 10, 11},
seek: 10,
success: false,
res: nil,
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 9, 10},
b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 11},
seek: 4,
success: true,
res: []storage.SeriesRef{9, 10},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 9, 10},
b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 11},
seek: 5,
success: true,
res: []storage.SeriesRef{9, 10},
},
{
a: []storage.SeriesRef{1, 2, 3, 4, 9, 10},
b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 11},
seek: 10,
success: true,
res: []storage.SeriesRef{10},
},
}
for _, c := range cases {
a := newListPostings(c.a...)
b := newListPostings(c.b...)
p := newRemovedPostings(a, b)
require.Equal(t, c.success, p.Seek(c.seek))
// After Seek(), At() should be called.
if c.success {
start := p.At()
lst, err := ExpandPostings(p)
require.NoError(t, err)
lst = append([]storage.SeriesRef{start}, lst...)
require.Equal(t, c.res, lst)
}
}
}
func TestBigEndian(t *testing.T) {
num := 1000
// mock a list as postings
ls := make([]uint32, num)
ls[0] = 2
for i := 1; i < num; i++ {
ls[i] = ls[i-1] + uint32(rand.Int31n(25)) + 2
}
beLst := make([]byte, num*4)
for i := range num {
b := beLst[i*4 : i*4+4]
binary.BigEndian.PutUint32(b, ls[i])
}
t.Run("Iteration", func(t *testing.T) {
bep := newBigEndianPostings(beLst)
for i := range num {
require.True(t, bep.Next())
require.Equal(t, storage.SeriesRef(ls[i]), bep.At())
}
require.False(t, bep.Next())
require.NoError(t, bep.Err())
})
t.Run("Seek", func(t *testing.T) {
table := []struct {
seek uint32
val uint32
found bool
}{
{
ls[0] - 1, ls[0], true,
},
{
ls[4], ls[4], true,
},
{
ls[500] - 1, ls[500], true,
},
{
ls[600] + 1, ls[601], true,
},
{
ls[600] + 1, ls[601], true,
},
{
ls[600] + 1, ls[601], true,
},
{
ls[0], ls[601], true,
},
{
ls[600], ls[601], true,
},
{
ls[999], ls[999], true,
},
{
ls[999] + 10, ls[999], false,
},
}
bep := newBigEndianPostings(beLst)
for _, v := range table {
require.Equal(t, v.found, bep.Seek(storage.SeriesRef(v.seek)))
require.Equal(t, storage.SeriesRef(v.val), bep.At())
require.NoError(t, bep.Err())
}
})
}
func TestIntersectWithMerge(t *testing.T) {
// One of the reproducible cases for:
// https://github.com/prometheus/prometheus/issues/2616
a := newListPostings(21, 22, 23, 24, 25, 30)
b := Merge(
context.Background(),
newListPostings(10, 20, 30),
newListPostings(15, 26, 30),
)
p := Intersect(a, b)
res, err := ExpandPostings(p)
require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{30}, res)
}
func TestWithoutPostings(t *testing.T) {
cases := []struct {
base Postings
drop Postings
res Postings
}{
{
base: EmptyPostings(),
drop: EmptyPostings(),
res: EmptyPostings(),
},
{
base: EmptyPostings(),
drop: newListPostings(1, 2),
res: EmptyPostings(),
},
{
base: newListPostings(1, 2),
drop: EmptyPostings(),
res: newListPostings(1, 2),
},
{
base: newListPostings(),
drop: newListPostings(),
res: newListPostings(),
},
{
base: newListPostings(1, 2, 3),
drop: newListPostings(),
res: newListPostings(1, 2, 3),
},
{
base: newListPostings(1, 2, 3),
drop: newListPostings(4, 5, 6),
res: newListPostings(1, 2, 3),
},
{
base: newListPostings(1, 2, 3),
drop: newListPostings(3, 4, 5),
res: newListPostings(1, 2),
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
require.NotNil(t, c.res, "without result expectancy cannot be nil")
expected, err := ExpandPostings(c.res)
require.NoError(t, err)
w := Without(c.base, c.drop)
if c.res == EmptyPostings() {
require.Equal(t, EmptyPostings(), w)
return
}
require.NotEqual(t, EmptyPostings(), w, "without unexpected result: EmptyPostings sentinel")
res, err := ExpandPostings(w)
require.NoError(t, err)
require.Equal(t, expected, res)
})
}
}
func BenchmarkPostings_Stats(b *testing.B) {
p := NewMemPostings()
var seriesID storage.SeriesRef
createPostingsLabelValues := func(name, valuePrefix string, count int) {
for n := 1; n < count; n++ {
value := fmt.Sprintf("%s-%d", valuePrefix, n)
p.Add(seriesID, labels.FromStrings(name, value))
seriesID++
}
}
createPostingsLabelValues("__name__", "metrics_name_can_be_very_big_and_bad", 1e3)
for i := range 20 {
createPostingsLabelValues(fmt.Sprintf("host-%d", i), "metrics_name_can_be_very_big_and_bad", 1e3)
createPostingsLabelValues(fmt.Sprintf("instance-%d", i), "10.0.IP.", 1e3)
createPostingsLabelValues(fmt.Sprintf("job-%d", i), "Small_Job_name", 1e3)
createPostingsLabelValues(fmt.Sprintf("err-%d", i), "avg_namespace-", 1e3)
createPostingsLabelValues(fmt.Sprintf("team-%d", i), "team-", 1e3)
createPostingsLabelValues(fmt.Sprintf("container_name-%d", i), "pod-", 1e3)
createPostingsLabelValues(fmt.Sprintf("cluster-%d", i), "newcluster-", 1e3)
createPostingsLabelValues(fmt.Sprintf("uid-%d", i), "123412312312312311-", 1e3)
createPostingsLabelValues(fmt.Sprintf("area-%d", i), "new_area_of_work-", 1e3)
createPostingsLabelValues(fmt.Sprintf("request_id-%d", i), "owner_name_work-", 1e3)
}
for b.Loop() {
p.Stats("__name__", 10, labels.SizeOfLabels)
}
}
func TestMemPostingsStats(t *testing.T) {
// create a new MemPostings
p := NewMemPostings()
// add some postings to the MemPostings
p.Add(1, labels.FromStrings("label", "value1"))
p.Add(1, labels.FromStrings("label", "value2"))
p.Add(1, labels.FromStrings("label", "value3"))
p.Add(2, labels.FromStrings("label", "value1"))
// call the Stats method to calculate the cardinality statistics
// passing a fake calculation so we get the same result regardless of compilation -tags.
stats := p.Stats("label", 10, func(name, value string, n uint64) uint64 { return uint64(len(name)+len(value)) * n })
// assert that the expected statistics were calculated
require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count)
require.Equal(t, "value1", stats.CardinalityMetricsStats[0].Name)
require.Equal(t, uint64(3), stats.CardinalityLabelStats[0].Count)
require.Equal(t, "label", stats.CardinalityLabelStats[0].Name)
require.Equal(t, uint64(44), stats.LabelValueStats[0].Count)
require.Equal(t, "label", stats.LabelValueStats[0].Name)
require.Equal(t, uint64(2), stats.LabelValuePairsStats[0].Count)
require.Equal(t, "label=value1", stats.LabelValuePairsStats[0].Name)
require.Equal(t, 3, stats.NumLabelPairs)
}
func TestMemPostings_Delete(t *testing.T) {
p := NewMemPostings()
p.Add(1, labels.FromStrings("lbl1", "a"))
p.Add(2, labels.FromStrings("lbl1", "b"))
p.Add(3, labels.FromStrings("lbl2", "a"))
before := p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value)
deletedRefs := map[storage.SeriesRef]struct{}{
2: {},
}
affectedLabels := map[labels.Label]struct{}{
{Name: "lbl1", Value: "b"}: {},
}
p.Delete(deletedRefs, affectedLabels)
after := p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value)
// Make sure postings gotten before the delete have the old data when
// iterated over.
expanded, err := ExpandPostings(before)
require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded)
// Make sure postings gotten after the delete have the new data when
// iterated over.
expanded, err = ExpandPostings(after)
require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{1, 3}, expanded)
deleted := p.Postings(context.Background(), "lbl1", "b")
expanded, err = ExpandPostings(deleted)
require.NoError(t, err)
require.Empty(t, expanded, "expected empty postings, got %v", expanded)
}
// BenchmarkMemPostings_Delete is quite heavy, so consider running it with
// -benchtime=10x or similar to get more stable and comparable results.
func BenchmarkMemPostings_Delete(b *testing.B) {
internedItoa := map[int]string{}
var mtx sync.RWMutex
itoa := func(i int) string {
mtx.RLock()
s, ok := internedItoa[i]
mtx.RUnlock()
if ok {
return s
}
mtx.Lock()
s = strconv.Itoa(i)
internedItoa[i] = s
mtx.Unlock()
return s
}
const total = 1e6
allSeries := [total]labels.Labels{}
nameValues := make([]string, 0, 100)
for i := range int(total) {
nameValues = nameValues[:0]
// A thousand labels like lbl_x_of_1000, each with total/1000 values
thousand := "lbl_" + itoa(i%1000) + "_of_1000"
nameValues = append(nameValues, thousand, itoa(i/1000))
// A hundred labels like lbl_x_of_100, each with total/100 values.
hundred := "lbl_" + itoa(i%100) + "_of_100"
nameValues = append(nameValues, hundred, itoa(i/100))
if i < 100 {
ten := "lbl_" + itoa(i%10) + "_of_10"
nameValues = append(nameValues, ten, itoa(i%10))
}
allSeries[i] = labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...)
}
for _, refs := range []int{1, 100, 10_000} {
b.Run(fmt.Sprintf("refs=%d", refs), func(b *testing.B) {
for _, reads := range []int{0, 1, 10} {
b.Run(fmt.Sprintf("readers=%d", reads), func(b *testing.B) {
if b.N > total/refs {
// Just to make sure that benchmark still makes sense.
panic("benchmark not prepared")
}
p := NewMemPostings()
for i := range allSeries {
p.Add(storage.SeriesRef(i), allSeries[i])
}
stop := make(chan struct{})
wg := sync.WaitGroup{}
for i := range reads {
wg.Add(1)
go func(i int) {
lbl := "lbl_" + itoa(i) + "_of_100"
defer wg.Done()
for {
select {
case <-stop:
return
default:
// Get a random value of this label.
p.Postings(context.Background(), lbl, itoa(rand.Intn(10000))).Next()
}
}
}(i)
}
b.Cleanup(func() {
close(stop)
wg.Wait()
})
b.ResetTimer()
for n := 0; b.Loop(); n++ {
deleted := make(map[storage.SeriesRef]struct{}, refs)
affected := make(map[labels.Label]struct{}, refs)
for i := range refs {
ref := storage.SeriesRef(n*refs + i)
deleted[ref] = struct{}{}
allSeries[ref].Range(func(l labels.Label) {
affected[l] = struct{}{}
})
}
p.Delete(deleted, affected)
}
})
}
})
}
}
func TestFindIntersectingPostings(t *testing.T) {
t.Run("multiple intersections", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 15, 20, 25, 30, 35, 40, 45, 50})
candidates := []Postings{
0: NewListPostings([]storage.SeriesRef{7, 13, 14, 27}), // Does not intersect.
1: NewListPostings([]storage.SeriesRef{10, 20}), // Does intersect.
2: NewListPostings([]storage.SeriesRef{29, 30, 31}), // Does intersect.
3: NewListPostings([]storage.SeriesRef{29, 30, 31}), // Does intersect (same again).
4: NewListPostings([]storage.SeriesRef{60}), // Does not intersect.
5: NewListPostings([]storage.SeriesRef{45}), // Does intersect.
6: EmptyPostings(), // Does not intersect.
}
indexes, err := FindIntersectingPostings(p, candidates)
require.NoError(t, err)
sort.Ints(indexes)
require.Equal(t, []int{1, 2, 3, 5}, indexes)
})
t.Run("no intersections", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 15, 20, 25, 30, 35, 40, 45, 50})
candidates := []Postings{
0: NewListPostings([]storage.SeriesRef{7, 13, 14, 27}), // Does not intersect.
1: NewListPostings([]storage.SeriesRef{60}), // Does not intersect.
2: EmptyPostings(), // Does not intersect.
}
indexes, err := FindIntersectingPostings(p, candidates)
require.NoError(t, err)
require.Empty(t, indexes)
})
t.Run("p is ErrPostings", func(t *testing.T) {
p := ErrPostings(context.Canceled)
candidates := []Postings{NewListPostings([]storage.SeriesRef{1})}
_, err := FindIntersectingPostings(p, candidates)
require.Error(t, err)
})
t.Run("one of the candidates is ErrPostings", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{1})
candidates := []Postings{
NewListPostings([]storage.SeriesRef{1}),
ErrPostings(context.Canceled),
}
_, err := FindIntersectingPostings(p, candidates)
require.Error(t, err)
})
t.Run("one of the candidates fails on nth call", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50, 60, 70})
candidates := []Postings{
NewListPostings([]storage.SeriesRef{7, 13, 14, 27}),
&postingsFailingAfterNthCall{2, NewListPostings([]storage.SeriesRef{29, 30, 31, 40})},
}
_, err := FindIntersectingPostings(p, candidates)
require.Error(t, err)
})
t.Run("p fails on the nth call", func(t *testing.T) {
p := &postingsFailingAfterNthCall{2, NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50, 60, 70})}
candidates := []Postings{
NewListPostings([]storage.SeriesRef{7, 13, 14, 27}),
NewListPostings([]storage.SeriesRef{29, 30, 31, 40}),
}
_, err := FindIntersectingPostings(p, candidates)
require.Error(t, err)
})
}
type postingsFailingAfterNthCall struct {
ttl int
Postings
}
func (p *postingsFailingAfterNthCall) Seek(v storage.SeriesRef) bool {
p.ttl--
if p.ttl <= 0 {
return false
}
return p.Postings.Seek(v)
}
func (p *postingsFailingAfterNthCall) Next() bool {
p.ttl--
if p.ttl <= 0 {
return false
}
return p.Postings.Next()
}
func (p *postingsFailingAfterNthCall) Err() error {
if p.ttl <= 0 {
return errors.New("ttl exceeded")
}
return p.Postings.Err()
}
func TestPostingsWithIndexHeap(t *testing.T) {
t.Run("iterate", func(t *testing.T) {
h := postingsWithIndexHeap{
{index: 0, p: NewListPostings([]storage.SeriesRef{10, 20, 30})},
{index: 1, p: NewListPostings([]storage.SeriesRef{1, 5})},
{index: 2, p: NewListPostings([]storage.SeriesRef{25, 50})},
}
for _, node := range h {
node.p.Next()
}
heap.Init(&h)
for _, expected := range []storage.SeriesRef{1, 5, 10, 20, 25, 30, 50} {
require.Equal(t, expected, h.at())
require.NoError(t, h.next())
}
require.True(t, h.empty())
})
t.Run("pop", func(t *testing.T) {
h := postingsWithIndexHeap{
{index: 0, p: NewListPostings([]storage.SeriesRef{10, 20, 30})},
{index: 1, p: NewListPostings([]storage.SeriesRef{1, 5})},
{index: 2, p: NewListPostings([]storage.SeriesRef{25, 50})},
}
for _, node := range h {
node.p.Next()
}
heap.Init(&h)
for _, expected := range []storage.SeriesRef{1, 5, 10, 20} {
require.Equal(t, expected, h.at())
require.NoError(t, h.next())
}
require.Equal(t, storage.SeriesRef(25), h.at())
node := heap.Pop(&h).(postingsWithIndex)
require.Equal(t, 2, node.index)
require.Equal(t, storage.SeriesRef(25), node.p.At())
})
}
func TestListPostings(t *testing.T) {
t.Run("empty list", func(t *testing.T) {
p := NewListPostings(nil)
require.Equal(t, 0, p.(*listPostings).Len())
require.False(t, p.Next())
require.False(t, p.Seek(10))
require.False(t, p.Next())
require.NoError(t, p.Err())
require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("one posting", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek less", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek equal", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek more", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.Equal(t, 1, p.(*listPostings).Len())
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek after next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Next())
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
require.Equal(t, 0, p.(*listPostings).Len())
})
})
t.Run("multiple postings", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20})
require.Equal(t, 2, p.(*listPostings).Len())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Next())
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/index/index_test.go | tsdb/index/index_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"context"
"errors"
"fmt"
"hash/crc32"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
type series struct {
l labels.Labels
chunks []chunks.Meta
}
type mockIndex struct {
series map[storage.SeriesRef]series
postings map[labels.Label][]storage.SeriesRef
symbols map[string]struct{}
}
func newMockIndex() mockIndex {
ix := mockIndex{
series: make(map[storage.SeriesRef]series),
postings: make(map[labels.Label][]storage.SeriesRef),
symbols: make(map[string]struct{}),
}
ix.postings[allPostingsKey] = []storage.SeriesRef{}
return ix
}
func (m mockIndex) Symbols() (map[string]struct{}, error) {
return m.symbols, nil
}
func (m mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error {
if _, ok := m.series[ref]; ok {
return fmt.Errorf("series with reference %d already added", ref)
}
l.Range(func(lbl labels.Label) {
m.symbols[lbl.Name] = struct{}{}
m.symbols[lbl.Value] = struct{}{}
if _, ok := m.postings[lbl]; !ok {
m.postings[lbl] = []storage.SeriesRef{}
}
m.postings[lbl] = append(m.postings[lbl], ref)
})
m.postings[allPostingsKey] = append(m.postings[allPostingsKey], ref)
s := series{l: l}
// Actual chunk data is not stored in the index.
for _, c := range chunks {
c.Chunk = nil
s.chunks = append(s.chunks, c)
}
m.series[ref] = s
return nil
}
func (mockIndex) Close() error {
return nil
}
func (m mockIndex) LabelValues(_ context.Context, name string) ([]string, error) {
values := []string{}
for l := range m.postings {
if l.Name == name {
values = append(values, l.Value)
}
}
return values, nil
}
func (m mockIndex) Postings(ctx context.Context, name string, values ...string) (Postings, error) {
p := []Postings{}
for _, value := range values {
l := labels.Label{Name: name, Value: value}
p = append(p, m.SortedPostings(NewListPostings(m.postings[l])))
}
return Merge(ctx, p...), nil
}
func (m mockIndex) SortedPostings(p Postings) Postings {
ep, err := ExpandPostings(p)
if err != nil {
return ErrPostings(fmt.Errorf("expand postings: %w", err))
}
sort.Slice(ep, func(i, j int) bool {
return labels.Compare(m.series[ep[i]].l, m.series[ep[j]].l) < 0
})
return NewListPostings(ep)
}
func (m mockIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
s, ok := m.series[ref]
if !ok {
return errors.New("not found")
}
builder.Assign(s.l)
*chks = append((*chks)[:0], s.chunks...)
return nil
}
func TestIndexRW_Create_Open(t *testing.T) {
dir := t.TempDir()
fn := filepath.Join(dir, indexFilename)
// An empty index must still result in a readable file.
iw, err := NewWriter(context.Background(), fn)
require.NoError(t, err)
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn, DecodePostingsRaw)
require.NoError(t, err)
require.NoError(t, ir.Close())
// Modify magic header must cause open to fail.
f, err := os.OpenFile(fn, os.O_WRONLY, 0o666)
require.NoError(t, err)
_, err = f.WriteAt([]byte{0, 0}, 0)
require.NoError(t, err)
f.Close()
_, err = NewFileReader(dir, DecodePostingsRaw)
require.Error(t, err)
}
func TestIndexRW_Postings(t *testing.T) {
ctx := context.Background()
var input indexWriterSeriesSlice
for i := 1; i < 5; i++ {
input = append(input, &indexWriterSeries{
labels: labels.FromStrings("a", "1", "b", strconv.Itoa(i)),
})
}
ir, fn, _ := createFileReader(ctx, t, input)
p, err := ir.Postings(ctx, "a", "1")
require.NoError(t, err)
var c []chunks.Meta
var builder labels.ScratchBuilder
for i := 0; p.Next(); i++ {
err := ir.Series(p.At(), &builder, &c)
require.NoError(t, err)
require.Empty(t, c)
testutil.RequireEqual(t, input[i].labels, builder.Labels())
}
require.NoError(t, p.Err())
t.Run("ShardedPostings()", func(t *testing.T) {
ir, err := NewFileReader(fn, DecodePostingsRaw)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, ir.Close())
})
// List all postings for a given label value. This is what we expect to get
// in output from all shards.
p, err = ir.Postings(ctx, "a", "1")
require.NoError(t, err)
var expected []storage.SeriesRef
for p.Next() {
expected = append(expected, p.At())
}
require.NoError(t, p.Err())
require.NotEmpty(t, expected)
// Query the same postings for each shard.
const shardCount = uint64(4)
actualShards := make(map[uint64][]storage.SeriesRef)
actualPostings := make([]storage.SeriesRef, 0, len(expected))
for shardIndex := range shardCount {
p, err = ir.Postings(ctx, "a", "1")
require.NoError(t, err)
p = ir.ShardedPostings(p, shardIndex, shardCount)
for p.Next() {
ref := p.At()
actualShards[shardIndex] = append(actualShards[shardIndex], ref)
actualPostings = append(actualPostings, ref)
}
require.NoError(t, p.Err())
}
// We expect the postings merged out of shards is the exact same of the non sharded ones.
require.ElementsMatch(t, expected, actualPostings)
// We expect the series in each shard are the expected ones.
for shardIndex, ids := range actualShards {
for _, id := range ids {
var lbls labels.ScratchBuilder
require.NoError(t, ir.Series(id, &lbls, nil))
require.Equal(t, shardIndex, labels.StableHash(lbls.Labels())%shardCount)
}
}
})
}
func TestPostingsMany(t *testing.T) {
ctx := context.Background()
// Create a label in the index which has 999 values.
var input indexWriterSeriesSlice
for i := 1; i < 1000; i++ {
v := fmt.Sprintf("%03d", i)
input = append(input, &indexWriterSeries{
labels: labels.FromStrings("i", v, "foo", "bar"),
})
}
ir, _, symbols := createFileReader(ctx, t, input)
cases := []struct {
in []string
}{
// Simple cases, everything is present.
{in: []string{"002"}},
{in: []string{"031", "032", "033"}},
{in: []string{"032", "033"}},
{in: []string{"127", "128"}},
{in: []string{"127", "128", "129"}},
{in: []string{"127", "129"}},
{in: []string{"128", "129"}},
{in: []string{"998", "999"}},
{in: []string{"999"}},
// Before actual values.
{in: []string{"000"}},
{in: []string{"000", "001"}},
{in: []string{"000", "002"}},
// After actual values.
{in: []string{"999a"}},
{in: []string{"999", "999a"}},
{in: []string{"998", "999", "999a"}},
// In the middle of actual values.
{in: []string{"126a", "127", "128"}},
{in: []string{"127", "127a", "128"}},
{in: []string{"127", "127a", "128", "128a", "129"}},
{in: []string{"127", "128a", "129"}},
{in: []string{"128", "128a", "129"}},
{in: []string{"128", "129", "129a"}},
{in: []string{"126a", "126b", "127", "127a", "127b", "128", "128a", "128b", "129", "129a", "129b"}},
}
var builder labels.ScratchBuilder
for _, c := range cases {
it, err := ir.Postings(ctx, "i", c.in...)
require.NoError(t, err)
got := []string{}
var metas []chunks.Meta
for it.Next() {
require.NoError(t, ir.Series(it.At(), &builder, &metas))
got = append(got, builder.Labels().Get("i"))
}
require.NoError(t, it.Err())
exp := []string{}
for _, e := range c.in {
if _, ok := symbols[e]; ok && e != "l" {
exp = append(exp, e)
}
}
require.Equalf(t, exp, got, "input: %v", c.in)
}
}
func TestPersistence_index_e2e(t *testing.T) {
ctx := context.Background()
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
require.NoError(t, err)
// Sort labels as the index writer expects series in sorted order.
sort.Sort(labels.Slice(lbls))
var input indexWriterSeriesSlice
ref := uint64(0)
// Generate ChunkMetas for every label set.
for i, lset := range lbls {
var metas []chunks.Meta
for j := 0; j <= (i % 20); j++ {
ref++
metas = append(metas, chunks.Meta{
MinTime: int64(j * 10000),
MaxTime: int64((j+1)*10000) - 1,
Ref: chunks.ChunkRef(ref),
Chunk: chunkenc.NewXORChunk(),
})
}
input = append(input, &indexWriterSeries{
labels: lset,
chunks: metas,
})
}
ir, _, _ := createFileReader(ctx, t, input)
// Population procedure as done by compaction.
var (
postings = NewMemPostings()
values = map[string]map[string]struct{}{}
)
mi := newMockIndex()
for i, s := range input {
require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...))
s.labels.Range(func(l labels.Label) {
valset, ok := values[l.Name]
if !ok {
valset = map[string]struct{}{}
values[l.Name] = valset
}
valset[l.Value] = struct{}{}
})
postings.Add(storage.SeriesRef(i), s.labels)
}
for p := range mi.postings {
gotp, err := ir.Postings(ctx, p.Name, p.Value)
require.NoError(t, err)
expp, err := mi.Postings(ctx, p.Name, p.Value)
require.NoError(t, err)
var chks, expchks []chunks.Meta
var builder, eBuilder labels.ScratchBuilder
for gotp.Next() {
require.True(t, expp.Next())
ref := gotp.At()
err := ir.Series(ref, &builder, &chks)
require.NoError(t, err)
err = mi.Series(expp.At(), &eBuilder, &expchks)
require.NoError(t, err)
testutil.RequireEqual(t, eBuilder.Labels(), builder.Labels())
require.Equal(t, expchks, chks)
}
require.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value)
require.NoError(t, gotp.Err())
}
labelPairs := map[string][]string{}
for l := range mi.postings {
labelPairs[l.Name] = append(labelPairs[l.Name], l.Value)
}
for k, v := range labelPairs {
sort.Strings(v)
res, err := ir.SortedLabelValues(ctx, k, nil)
require.NoError(t, err)
require.Len(t, res, len(v))
for i := range v {
require.Equal(t, v[i], res[i])
}
}
gotSymbols := []string{}
it := ir.Symbols()
for it.Next() {
gotSymbols = append(gotSymbols, it.At())
}
require.NoError(t, it.Err())
expSymbols := []string{}
for s := range mi.symbols {
expSymbols = append(expSymbols, s)
}
sort.Strings(expSymbols)
require.Equal(t, expSymbols, gotSymbols)
}
func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) {
w, err := NewWriter(context.Background(), filepath.Join(t.TempDir(), "index"))
require.NoError(t, err)
require.NoError(t, w.AddSymbol("__name__"))
require.NoError(t, w.AddSymbol("metric_1"))
require.NoError(t, w.AddSymbol("metric_2"))
require.NoError(t, w.AddSeries(0, labels.FromStrings("__name__", "metric_1", "__name__", "metric_2")))
err = w.Close()
require.Error(t, err)
require.ErrorContains(t, err, "corruption detected when writing postings to index")
}
func TestDecbufUvarintWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
db := encoding.NewDecbufUvarintAt(b, 0, castagnoliTable)
require.Error(t, db.Err())
}
func TestReaderWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
_, err := NewReader(b, DecodePostingsRaw)
require.Error(t, err)
}
// TestNewFileReaderErrorNoOpenFiles ensures that in case of an error no file remains open.
func TestNewFileReaderErrorNoOpenFiles(t *testing.T) {
dir := testutil.NewTemporaryDirectory("block", t)
idxName := filepath.Join(dir.Path(), "index")
err := os.WriteFile(idxName, []byte("corrupted contents"), 0o666)
require.NoError(t, err)
_, err = NewFileReader(idxName, DecodePostingsRaw)
require.Error(t, err)
// dir.Close will fail on Win if idxName fd is not closed on error path.
dir.Close()
}
func TestSymbols(t *testing.T) {
buf := encoding.Encbuf{}
// Add prefix to the buffer to simulate symbols as part of larger buffer.
buf.PutUvarintStr("something")
symbolsStart := buf.Len()
buf.PutBE32int(204) // Length of symbols table.
buf.PutBE32int(100) // Number of symbols.
for i := range 100 {
// i represents index in unicode characters table.
buf.PutUvarintStr(string(rune(i))) // Symbol.
}
checksum := crc32.Checksum(buf.Get()[symbolsStart+4:], castagnoliTable)
buf.PutBE32(checksum) // Check sum at the end.
s, err := NewSymbols(realByteSlice(buf.Get()), FormatV2, symbolsStart)
require.NoError(t, err)
// We store only 4 offsets to symbols.
require.Equal(t, 32, s.Size())
for i := 99; i >= 0; i-- {
s, err := s.Lookup(uint32(i))
require.NoError(t, err)
require.Equal(t, string(rune(i)), s)
}
_, err = s.Lookup(100)
require.Error(t, err)
for i := 99; i >= 0; i-- {
r, err := s.ReverseLookup(string(rune(i)))
require.NoError(t, err)
require.Equal(t, uint32(i), r)
}
_, err = s.ReverseLookup(string(rune(100)))
require.Error(t, err)
iter := s.Iter()
i := 0
for iter.Next() {
require.Equal(t, string(rune(i)), iter.At())
i++
}
require.NoError(t, iter.Err())
}
func BenchmarkReader_ShardedPostings(b *testing.B) {
const (
numSeries = 10000
numShards = 16
)
ctx := context.Background()
var input indexWriterSeriesSlice
for i := 1; i <= numSeries; i++ {
input = append(input, &indexWriterSeries{
labels: labels.FromStrings("const", fmt.Sprintf("%10d", 1), "unique", fmt.Sprintf("%10d", i)),
})
}
ir, _, _ := createFileReader(ctx, b, input)
for n := 0; b.Loop(); n++ {
allPostings, err := ir.Postings(ctx, "const", fmt.Sprintf("%10d", 1))
require.NoError(b, err)
ir.ShardedPostings(allPostings, uint64(n%numShards), numShards)
}
}
func TestDecoder_Postings_WrongInput(t *testing.T) {
d := encoding.Decbuf{B: []byte("the cake is a lie")}
_, _, err := (&Decoder{DecodePostings: DecodePostingsRaw}).DecodePostings(d)
require.Error(t, err)
}
func TestChunksRefOrdering(t *testing.T) {
dir := t.TempDir()
idx, err := NewWriter(context.Background(), filepath.Join(dir, "index"))
require.NoError(t, err)
require.NoError(t, idx.AddSymbol("1"))
require.NoError(t, idx.AddSymbol("2"))
require.NoError(t, idx.AddSymbol("__name__"))
c50 := chunks.Meta{Ref: 50}
c100 := chunks.Meta{Ref: 100}
c200 := chunks.Meta{Ref: 200}
require.NoError(t, idx.AddSeries(1, labels.FromStrings("__name__", "1"), c100))
require.EqualError(t, idx.AddSeries(2, labels.FromStrings("__name__", "2"), c50), "unsorted chunk reference: 50, previous: 100")
require.NoError(t, idx.AddSeries(2, labels.FromStrings("__name__", "2"), c200))
require.NoError(t, idx.Close())
}
func TestChunksTimeOrdering(t *testing.T) {
dir := t.TempDir()
idx, err := NewWriter(context.Background(), filepath.Join(dir, "index"))
require.NoError(t, err)
require.NoError(t, idx.AddSymbol("1"))
require.NoError(t, idx.AddSymbol("2"))
require.NoError(t, idx.AddSymbol("__name__"))
require.NoError(t, idx.AddSeries(1, labels.FromStrings("__name__", "1"),
chunks.Meta{Ref: 1, MinTime: 0, MaxTime: 10}, // Also checks that first chunk can have MinTime: 0.
chunks.Meta{Ref: 2, MinTime: 11, MaxTime: 20},
chunks.Meta{Ref: 3, MinTime: 21, MaxTime: 30},
))
require.EqualError(t, idx.AddSeries(1, labels.FromStrings("__name__", "2"),
chunks.Meta{Ref: 10, MinTime: 0, MaxTime: 10},
chunks.Meta{Ref: 20, MinTime: 10, MaxTime: 20},
), "chunk minT 10 is not higher than previous chunk maxT 10")
require.EqualError(t, idx.AddSeries(1, labels.FromStrings("__name__", "2"),
chunks.Meta{Ref: 10, MinTime: 100, MaxTime: 30},
), "chunk maxT 30 is less than minT 100")
require.NoError(t, idx.Close())
}
func TestReader_PostingsForLabelMatching(t *testing.T) {
const seriesCount = 9
var input indexWriterSeriesSlice
for i := 1; i <= seriesCount; i++ {
input = append(input, &indexWriterSeries{
labels: labels.FromStrings("__name__", strconv.Itoa(i)),
chunks: []chunks.Meta{
{Ref: 1, MinTime: 0, MaxTime: 10},
},
})
}
ir, _, _ := createFileReader(context.Background(), t, input)
p := ir.PostingsForLabelMatching(context.Background(), "__name__", func(v string) bool {
iv, err := strconv.Atoi(v)
if err != nil {
panic(err)
}
return iv%2 == 0
})
require.NoError(t, p.Err())
refs, err := ExpandPostings(p)
require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{4, 6, 8, 10}, refs)
}
func TestReader_PostingsForAllLabelValues(t *testing.T) {
const seriesCount = 9
var input indexWriterSeriesSlice
for i := 1; i <= seriesCount; i++ {
input = append(input, &indexWriterSeries{
labels: labels.FromStrings("__name__", strconv.Itoa(i)),
chunks: []chunks.Meta{
{Ref: 1, MinTime: 0, MaxTime: 10},
},
})
}
ir, _, _ := createFileReader(context.Background(), t, input)
p := ir.PostingsForAllLabelValues(context.Background(), "__name__")
require.NoError(t, p.Err())
refs, err := ExpandPostings(p)
require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{3, 4, 5, 6, 7, 8, 9, 10, 11}, refs)
}
func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) {
const seriesCount = 1000
var input indexWriterSeriesSlice
for i := 1; i <= seriesCount; i++ {
input = append(input, &indexWriterSeries{
labels: labels.FromStrings("__name__", fmt.Sprintf("%4d", i)),
chunks: []chunks.Meta{
{Ref: 1, MinTime: 0, MaxTime: 10},
},
})
}
ir, _, _ := createFileReader(context.Background(), t, input)
failAfter := uint64(seriesCount / 2) // Fail after processing half of the series.
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
p := ir.PostingsForLabelMatching(ctx, "__name__", func(string) bool {
return true
})
require.Error(t, p.Err())
require.Equal(t, failAfter, ctx.Count())
}
func TestReader_LabelNamesForHonorsContextCancel(t *testing.T) {
const seriesCount = 1000
var input indexWriterSeriesSlice
for i := 1; i <= seriesCount; i++ {
input = append(input, &indexWriterSeries{
labels: labels.FromStrings(labels.MetricName, fmt.Sprintf("%4d", i)),
chunks: []chunks.Meta{
{Ref: 1, MinTime: 0, MaxTime: 10},
},
})
}
ir, _, _ := createFileReader(context.Background(), t, input)
name, value := AllPostingsKey()
p, err := ir.Postings(context.Background(), name, value)
require.NoError(t, err)
// We check context cancellation every 128 iterations so 3 will fail after
// iterating 3 * 128 series.
failAfter := uint64(3)
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
_, err = ir.LabelNamesFor(ctx, p)
require.Error(t, err)
require.Equal(t, failAfter, ctx.Count())
}
// createFileReader creates a temporary index file. It writes the provided input to this file.
// It returns a Reader for this file, the file's name, and the symbol map.
func createFileReader(ctx context.Context, tb testing.TB, input indexWriterSeriesSlice) (*Reader, string, map[string]struct{}) {
tb.Helper()
fn := filepath.Join(tb.TempDir(), indexFilename)
iw, err := NewWriter(ctx, fn)
require.NoError(tb, err)
symbols := map[string]struct{}{}
for _, s := range input {
s.labels.Range(func(l labels.Label) {
symbols[l.Name] = struct{}{}
symbols[l.Value] = struct{}{}
})
}
syms := []string{}
for s := range symbols {
syms = append(syms, s)
}
slices.Sort(syms)
for _, s := range syms {
require.NoError(tb, iw.AddSymbol(s))
}
for i, s := range input {
require.NoError(tb, iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...))
}
require.NoError(tb, iw.Close())
ir, err := NewFileReader(fn, DecodePostingsRaw)
require.NoError(tb, err)
tb.Cleanup(func() {
require.NoError(tb, ir.Close())
})
return ir, fn, symbols
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/index/postings.go | tsdb/index/postings.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"container/heap"
"context"
"encoding/binary"
"fmt"
"maps"
"math"
"runtime"
"slices"
"sort"
"strings"
"sync"
"time"
"github.com/bboreham/go-loser"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
)
const exponentialSliceGrowthFactor = 2
var allPostingsKey = labels.Label{}
// AllPostingsKey returns the label key that is used to store the postings list of all existing IDs.
func AllPostingsKey() (name, value string) {
return allPostingsKey.Name, allPostingsKey.Value
}
// ensureOrderBatchSize is the max number of postings passed to a worker in a single batch in MemPostings.EnsureOrder().
const ensureOrderBatchSize = 1024
// ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder().
var ensureOrderBatchPool = sync.Pool{
New: func() any {
x := make([][]storage.SeriesRef, 0, ensureOrderBatchSize)
return &x // Return pointer type as preferred by Pool.
},
}
// MemPostings holds postings list for series ID per label pair. They may be written
// to out of order.
// EnsureOrder() must be called once before any reads are done. This allows for quick
// unordered batch fills on startup.
type MemPostings struct {
mtx sync.RWMutex
// m holds the postings lists for each label-value pair, indexed first by label name, and then by label value.
//
// mtx must be held when interacting with m (the appropriate one for reading or writing).
// It is safe to retain a reference to a postings list after releasing the lock.
//
// BUG: There's currently a data race in addFor, which might modify the tail of the postings list:
// https://github.com/prometheus/prometheus/issues/15317
m map[string]map[string][]storage.SeriesRef
// lvs holds the label values for each label name.
// lvs[name] is essentially an unsorted append-only list of all keys in m[name]
// mtx must be held when interacting with lvs.
// Since it's append-only, it is safe to read the label values slice after releasing the lock.
lvs map[string][]string
ordered bool
}
const defaultLabelNamesMapSize = 512
// NewMemPostings returns a memPostings that's ready for reads and writes.
func NewMemPostings() *MemPostings {
return &MemPostings{
m: make(map[string]map[string][]storage.SeriesRef, defaultLabelNamesMapSize),
lvs: make(map[string][]string, defaultLabelNamesMapSize),
ordered: true,
}
}
// NewUnorderedMemPostings returns a memPostings that is not safe to be read from
// until EnsureOrder() was called once.
func NewUnorderedMemPostings() *MemPostings {
return &MemPostings{
m: make(map[string]map[string][]storage.SeriesRef, defaultLabelNamesMapSize),
lvs: make(map[string][]string, defaultLabelNamesMapSize),
ordered: false,
}
}
// Symbols returns an iterator over all unique name and value strings, in order.
func (p *MemPostings) Symbols() StringIter {
p.mtx.RLock()
// Make a quick clone of the map to avoid holding the lock while iterating.
// It's safe to use the values of the map after releasing the lock, as they're append-only slices.
lvs := maps.Clone(p.lvs)
p.mtx.RUnlock()
// Add all the strings to a map to de-duplicate.
symbols := make(map[string]struct{}, defaultLabelNamesMapSize)
for n, labelValues := range lvs {
symbols[n] = struct{}{}
for _, v := range labelValues {
symbols[v] = struct{}{}
}
}
res := make([]string, 0, len(symbols))
for k := range symbols {
res = append(res, k)
}
slices.Sort(res)
return NewStringListIter(res)
}
// SortedKeys returns a list of sorted label keys of the postings.
func (p *MemPostings) SortedKeys() []labels.Label {
p.mtx.RLock()
keys := make([]labels.Label, 0, len(p.m))
for n, e := range p.m {
for v := range e {
keys = append(keys, labels.Label{Name: n, Value: v})
}
}
p.mtx.RUnlock()
slices.SortFunc(keys, func(a, b labels.Label) int {
nameCompare := strings.Compare(a.Name, b.Name)
// If names are the same, compare values.
if nameCompare != 0 {
return nameCompare
}
return strings.Compare(a.Value, b.Value)
})
return keys
}
// LabelNames returns all the unique label names.
func (p *MemPostings) LabelNames() []string {
p.mtx.RLock()
defer p.mtx.RUnlock()
n := len(p.m)
if n == 0 {
return nil
}
names := make([]string, 0, n-1)
for name := range p.m {
if name != allPostingsKey.Name {
names = append(names, name)
}
}
return names
}
// LabelValues returns label values for the given name.
func (p *MemPostings) LabelValues(_ context.Context, name string, hints *storage.LabelHints) []string {
p.mtx.RLock()
values := p.lvs[name]
p.mtx.RUnlock()
if hints != nil && hints.Limit > 0 && len(values) > hints.Limit {
values = values[:hints.Limit]
}
// The slice from p.lvs[name] is shared between all readers, and it is append-only.
// Since it's shared, we need to make a copy of it before returning it to make
// sure that no caller modifies the original one by sorting it or filtering it.
// Since it's append-only, we can do this while not holding the mutex anymore.
return slices.Clone(values)
}
// PostingsStats contains cardinality based statistics for postings.
type PostingsStats struct {
CardinalityMetricsStats []Stat
CardinalityLabelStats []Stat
LabelValueStats []Stat
LabelValuePairsStats []Stat
NumLabelPairs int
}
// Stats calculates the cardinality statistics from postings.
// Caller can pass in a function which computes the space required for n series with a given label.
func (p *MemPostings) Stats(label string, limit int, labelSizeFunc func(string, string, uint64) uint64) *PostingsStats {
var size uint64
p.mtx.RLock()
metrics := &maxHeap{}
labels := &maxHeap{}
labelValueLength := &maxHeap{}
labelValuePairs := &maxHeap{}
numLabelPairs := 0
metrics.init(limit)
labels.init(limit)
labelValueLength.init(limit)
labelValuePairs.init(limit)
for n, e := range p.m {
if n == "" {
continue
}
labels.push(Stat{Name: n, Count: uint64(len(e))})
numLabelPairs += len(e)
size = 0
for name, values := range e {
if n == label {
metrics.push(Stat{Name: name, Count: uint64(len(values))})
}
seriesCnt := uint64(len(values))
labelValuePairs.push(Stat{Name: n + "=" + name, Count: seriesCnt})
size += labelSizeFunc(n, name, seriesCnt)
}
labelValueLength.push(Stat{Name: n, Count: size})
}
p.mtx.RUnlock()
return &PostingsStats{
CardinalityMetricsStats: metrics.get(),
CardinalityLabelStats: labels.get(),
LabelValueStats: labelValueLength.get(),
LabelValuePairsStats: labelValuePairs.get(),
NumLabelPairs: numLabelPairs,
}
}
// All returns a postings list over all documents ever added.
func (p *MemPostings) All() Postings {
return p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value)
}
// EnsureOrder ensures that all postings lists are sorted. After it returns all further
// calls to add and addFor will insert new IDs in a sorted manner.
// Parameter numberOfConcurrentProcesses is used to specify the maximal number of
// CPU cores used for this operation. If it is <= 0, GOMAXPROCS is used.
// GOMAXPROCS was the default before introducing this parameter.
func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) {
p.mtx.Lock()
defer p.mtx.Unlock()
if p.ordered {
return
}
concurrency := numberOfConcurrentProcesses
if concurrency <= 0 {
concurrency = runtime.GOMAXPROCS(0)
}
workc := make(chan *[][]storage.SeriesRef)
var wg sync.WaitGroup
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go func() {
for job := range workc {
for _, l := range *job {
slices.Sort(l)
}
*job = (*job)[:0]
ensureOrderBatchPool.Put(job)
}
wg.Done()
}()
}
nextJob := ensureOrderBatchPool.Get().(*[][]storage.SeriesRef)
for _, e := range p.m {
for _, l := range e {
*nextJob = append(*nextJob, l)
if len(*nextJob) >= ensureOrderBatchSize {
workc <- nextJob
nextJob = ensureOrderBatchPool.Get().(*[][]storage.SeriesRef)
}
}
}
// If the last job was partially filled, we need to push it to workers too.
if len(*nextJob) > 0 {
workc <- nextJob
}
close(workc)
wg.Wait()
p.ordered = true
}
// Delete removes all ids in the given map from the postings lists.
// affectedLabels contains all the labels that are affected by the deletion, there's no need to check other labels.
func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) {
p.mtx.Lock()
defer p.mtx.Unlock()
affectedLabelNames := map[string]struct{}{}
process := func(l labels.Label) {
orig := p.m[l.Name][l.Value]
repl := make([]storage.SeriesRef, 0, len(orig))
for _, id := range orig {
if _, ok := deleted[id]; !ok {
repl = append(repl, id)
}
}
if len(repl) > 0 {
p.m[l.Name][l.Value] = repl
} else {
delete(p.m[l.Name], l.Value)
affectedLabelNames[l.Name] = struct{}{}
}
}
i := 0
for l := range affected {
i++
process(l)
// From time to time we want some readers to go through and read their postings.
// It takes around 50ms to process a 1K series batch, and 120ms to process a 10K series batch (local benchmarks on an M3).
// Note that a read query will most likely want to read multiple postings lists, say 5, 10 or 20 (depending on the number of matchers)
// And that read query will most likely evaluate only one of those matchers before we unpause here, so we want to pause often.
if i%512 == 0 {
p.unlockWaitAndLockAgain()
}
}
process(allPostingsKey)
// Now we need to update the label values slices.
i = 0
for name := range affectedLabelNames {
i++
// From time to time we want some readers to go through and read their postings.
if i%512 == 0 {
p.unlockWaitAndLockAgain()
}
if len(p.m[name]) == 0 {
// Delete the label name key if we deleted all values.
delete(p.m, name)
delete(p.lvs, name)
continue
}
// Create the new slice with enough room to grow without reallocating.
// We have deleted values here, so there's definitely some churn, so be prepared for it.
lvs := make([]string, 0, exponentialSliceGrowthFactor*len(p.m[name]))
for v := range p.m[name] {
lvs = append(lvs, v)
}
p.lvs[name] = lvs
}
}
// unlockWaitAndLockAgain will unlock an already locked p.mtx.Lock() and then wait a little bit before locking it again,
// letting the RLock()-waiting goroutines to get the lock.
func (p *MemPostings) unlockWaitAndLockAgain() {
p.mtx.Unlock()
// While it's tempting to just do a `time.Sleep(time.Millisecond)` here,
// it wouldn't ensure use that readers actually were able to get the read lock,
// because if there are writes waiting on same mutex, readers won't be able to get it.
// So we just grab one RLock ourselves.
p.mtx.RLock()
// We shouldn't wait here, because we would be blocking a potential write for no reason.
// Note that if there's a writer waiting for us to unlock, no reader will be able to get the read lock.
p.mtx.RUnlock() //nolint:staticcheck // SA2001: this is an intentionally empty critical section.
// Now we can wait a little bit just to increase the chance of a reader getting the lock.
time.Sleep(time.Millisecond)
p.mtx.Lock()
}
// Iter calls f for each postings list. It aborts if f returns an error and returns it.
func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error {
p.mtx.RLock()
defer p.mtx.RUnlock()
for n, e := range p.m {
for v, p := range e {
if err := f(labels.Label{Name: n, Value: v}, NewListPostings(p)); err != nil {
return err
}
}
}
return nil
}
// Add a label set to the postings index.
func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) {
p.mtx.Lock()
lset.Range(func(l labels.Label) {
p.addFor(id, l)
})
p.addFor(id, allPostingsKey)
p.mtx.Unlock()
}
func appendWithExponentialGrowth[T any](a []T, v T) []T {
if cap(a) < len(a)+1 {
newList := make([]T, len(a), len(a)*exponentialSliceGrowthFactor+1)
copy(newList, a)
a = newList
}
return append(a, v)
}
func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) {
nm, ok := p.m[l.Name]
if !ok {
nm = map[string][]storage.SeriesRef{}
p.m[l.Name] = nm
}
vm, ok := nm[l.Value]
if !ok {
p.lvs[l.Name] = appendWithExponentialGrowth(p.lvs[l.Name], l.Value)
}
list := appendWithExponentialGrowth(vm, id)
nm[l.Value] = list
if !p.ordered {
return
}
// There is no guarantee that no higher ID was inserted before as they may
// be generated independently before adding them to postings.
// We repair order violations on insert. The invariant is that the first n-1
// items in the list are already sorted.
for i := len(list) - 1; i >= 1; i-- {
if list[i] >= list[i-1] {
break
}
list[i], list[i-1] = list[i-1], list[i]
}
}
func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings {
// We'll take the label values slice and then match over that,
// this way we don't need to hold the mutex while we're matching,
// which can be slow (seconds) if the match function is a huge regex.
// Holding this lock prevents new series from being added (slows down the write path)
// and blocks the compaction process.
//
// We just need to make sure we don't modify the slice we took,
// so we'll append matching values to a different one.
p.mtx.RLock()
readOnlyLabelValues := p.lvs[name]
p.mtx.RUnlock()
vals := make([]string, 0, len(readOnlyLabelValues))
for i, v := range readOnlyLabelValues {
if i%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return ErrPostings(ctx.Err())
}
if match(v) {
vals = append(vals, v)
}
}
// If none matched (or this label had no values), no need to grab the lock again.
if len(vals) == 0 {
return EmptyPostings()
}
// Now `vals` only contains the values that matched, get their postings.
its := make([]*listPostings, 0, len(vals))
lps := make([]listPostings, len(vals))
p.mtx.RLock()
e := p.m[name]
for i, v := range vals {
if refs, ok := e[v]; ok {
// Some of the values may have been garbage-collected in the meantime this is fine, we'll just skip them.
// If we didn't let the mutex go, we'd have these postings here, but they would be pointing nowhere
// because there would be a `MemPostings.Delete()` call waiting for the lock to delete these labels,
// because the series were deleted already.
lps[i] = listPostings{list: refs}
its = append(its, &lps[i])
}
}
// Let the mutex go before merging.
p.mtx.RUnlock()
return Merge(ctx, its...)
}
// Postings returns a postings iterator for the given label values.
func (p *MemPostings) Postings(ctx context.Context, name string, values ...string) Postings {
res := make([]*listPostings, 0, len(values))
lps := make([]listPostings, len(values))
p.mtx.RLock()
postingsMapForName := p.m[name]
for i, value := range values {
if lp := postingsMapForName[value]; lp != nil {
lps[i] = listPostings{list: lp}
res = append(res, &lps[i])
}
}
p.mtx.RUnlock()
return Merge(ctx, res...)
}
func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string) Postings {
p.mtx.RLock()
e := p.m[name]
its := make([]*listPostings, 0, len(e))
lps := make([]listPostings, len(e))
i := 0
for _, refs := range e {
if len(refs) > 0 {
lps[i] = listPostings{list: refs}
its = append(its, &lps[i])
}
i++
}
// Let the mutex go before merging.
p.mtx.RUnlock()
return Merge(ctx, its...)
}
// ExpandPostings returns the postings expanded as a slice.
func ExpandPostings(p Postings) (res []storage.SeriesRef, err error) {
for p.Next() {
res = append(res, p.At())
}
return res, p.Err()
}
// Postings provides iterative access over an ordered list of SeriesRef.
type Postings interface {
// Next advances the iterator and returns true if another value was found.
Next() bool
// Seek advances the iterator to value v or greater and returns
// true if a value was found.
Seek(v storage.SeriesRef) bool
// At returns the value at the current iterator position.
// At should only be called after a successful call to Next or Seek.
At() storage.SeriesRef
// Err returns the last error of the iterator.
Err() error
}
// errPostings is an empty iterator that always errors.
type errPostings struct {
err error
}
func (errPostings) Next() bool { return false }
func (errPostings) Seek(storage.SeriesRef) bool { return false }
func (errPostings) At() storage.SeriesRef { return 0 }
func (e errPostings) Err() error { return e.err }
var emptyPostings = errPostings{}
// EmptyPostings returns a postings list that's always empty.
// NOTE: Returning EmptyPostings sentinel when Postings struct has no postings is recommended.
// It triggers optimized flow in other functions like Intersect, Without etc.
func EmptyPostings() Postings {
return emptyPostings
}
// IsEmptyPostingsType returns true if the postings are an empty postings list.
// When this function returns false, it doesn't mean that the postings isn't empty
// (it could be an empty intersection of two non-empty postings, for example).
func IsEmptyPostingsType(p Postings) bool {
return p == emptyPostings
}
// ErrPostings returns new postings that immediately error.
func ErrPostings(err error) Postings {
return errPostings{err}
}
// Intersect returns a new postings list over the intersection of the
// input postings.
func Intersect(its ...Postings) Postings {
if len(its) == 0 {
return EmptyPostings()
}
if len(its) == 1 {
return its[0]
}
if slices.Contains(its, EmptyPostings()) {
return EmptyPostings()
}
return newIntersectPostings(its...)
}
type intersectPostings struct {
postings []Postings // These are the postings we will be intersecting.
current storage.SeriesRef // The current intersection, if Seek() or Next() has returned true.
}
func newIntersectPostings(its ...Postings) *intersectPostings {
return &intersectPostings{postings: its}
}
func (it *intersectPostings) At() storage.SeriesRef {
return it.current
}
func (it *intersectPostings) Seek(target storage.SeriesRef) bool {
for {
allEqual := true
for _, p := range it.postings {
if !p.Seek(target) {
return false
}
if p.At() > target {
target = p.At()
allEqual = false
}
}
// if all p.At() are all equal, we found an intersection.
if allEqual {
it.current = target
return true
}
}
}
func (it *intersectPostings) Next() bool {
// Move forward the first Postings and take its value as the target to match.
if !it.postings[0].Next() {
return false
}
target := it.postings[0].At()
allEqual := true
for _, p := range it.postings[1:] { // Now move forward all the other ones and check if they match.
if !p.Next() {
return false
}
at := p.At()
if at > target { // This one is past the target, so pick up a new target to Seek at the end.
target = at
allEqual = false
} else if at < target { // This one needs to Seek to the target, but carry on with other postings in case they have an even higher target.
allEqual = false
}
}
if allEqual {
it.current = target
return true
}
return it.Seek(target)
}
func (it *intersectPostings) Err() error {
for _, p := range it.postings {
if p.Err() != nil {
return p.Err()
}
}
return nil
}
// Merge returns a new iterator over the union of the input iterators.
func Merge[T Postings](_ context.Context, its ...T) Postings {
if len(its) == 0 {
return EmptyPostings()
}
if len(its) == 1 {
return its[0]
}
p, ok := newMergedPostings(its)
if !ok {
return EmptyPostings()
}
return p
}
type mergedPostings[T Postings] struct {
p []T
h *loser.Tree[storage.SeriesRef, T]
cur storage.SeriesRef
}
func newMergedPostings[T Postings](p []T) (m *mergedPostings[T], nonEmpty bool) {
const maxVal = storage.SeriesRef(math.MaxUint64) // This value must be higher than all real values used in the tree.
lt := loser.New(p, maxVal)
return &mergedPostings[T]{p: p, h: lt}, true
}
func (it *mergedPostings[T]) Next() bool {
for {
if !it.h.Next() {
return false
}
// Remove duplicate entries.
newItem := it.h.At()
if newItem != it.cur {
it.cur = newItem
return true
}
}
}
func (it *mergedPostings[T]) Seek(id storage.SeriesRef) bool {
for !it.h.IsEmpty() && it.h.At() < id {
finished := !it.h.Winner().Seek(id)
it.h.Fix(finished)
}
if it.h.IsEmpty() {
return false
}
it.cur = it.h.At()
return true
}
func (it mergedPostings[T]) At() storage.SeriesRef {
return it.cur
}
func (it mergedPostings[T]) Err() error {
for _, p := range it.p {
if err := p.Err(); err != nil {
return err
}
}
return nil
}
// Without returns a new postings list that contains all elements from the full list that
// are not in the drop list.
func Without(full, drop Postings) Postings {
if full == EmptyPostings() {
return EmptyPostings()
}
if drop == EmptyPostings() {
return full
}
return newRemovedPostings(full, drop)
}
type removedPostings struct {
full, remove Postings
cur storage.SeriesRef
initialized bool
fok, rok bool
}
func newRemovedPostings(full, remove Postings) *removedPostings {
return &removedPostings{
full: full,
remove: remove,
}
}
func (rp *removedPostings) At() storage.SeriesRef {
return rp.cur
}
func (rp *removedPostings) Next() bool {
if !rp.initialized {
rp.fok = rp.full.Next()
rp.rok = rp.remove.Next()
rp.initialized = true
}
for {
if !rp.fok {
return false
}
if !rp.rok {
rp.cur = rp.full.At()
rp.fok = rp.full.Next()
return true
}
switch fcur, rcur := rp.full.At(), rp.remove.At(); {
case fcur < rcur:
rp.cur = fcur
rp.fok = rp.full.Next()
return true
case rcur < fcur:
// Forward the remove postings to the right position.
rp.rok = rp.remove.Seek(fcur)
default:
// Skip the current posting.
rp.fok = rp.full.Next()
}
}
}
func (rp *removedPostings) Seek(id storage.SeriesRef) bool {
if rp.cur >= id {
return true
}
rp.fok = rp.full.Seek(id)
rp.rok = rp.remove.Seek(id)
rp.initialized = true
return rp.Next()
}
func (rp *removedPostings) Err() error {
if rp.full.Err() != nil {
return rp.full.Err()
}
return rp.remove.Err()
}
// listPostings implements the Postings interface over a plain list.
type listPostings struct {
list []storage.SeriesRef
cur storage.SeriesRef
}
// NewListPostings creates a Postings from the supplied SeriesRefs, which must be in order.
// The list slice passed in is retained.
func NewListPostings(list []storage.SeriesRef) Postings {
return &listPostings{list: list}
}
func (it *listPostings) At() storage.SeriesRef {
return it.cur
}
func (it *listPostings) Next() bool {
if len(it.list) > 0 {
it.cur = it.list[0]
it.list = it.list[1:]
return true
}
it.cur = 0
return false
}
func (it *listPostings) Seek(x storage.SeriesRef) bool {
// If the current value satisfies, then return.
if it.cur >= x {
return true
}
if len(it.list) == 0 {
return false
}
i := 0 // Check the next item in the list, otherwise binary search between current position and end.
if it.list[0] < x {
i, _ = slices.BinarySearch(it.list, x)
if i >= len(it.list) { // Off the end - terminate the iterator.
it.list = nil
return false
}
}
it.cur = it.list[i]
it.list = it.list[i+1:]
return true
}
func (*listPostings) Err() error {
return nil
}
// Len returns the remaining number of postings in the list.
func (it *listPostings) Len() int {
return len(it.list)
}
// bigEndianPostings implements the Postings interface over a byte stream of
// big endian numbers.
type bigEndianPostings struct {
list []byte
cur uint32
}
func newBigEndianPostings(list []byte) *bigEndianPostings {
return &bigEndianPostings{list: list}
}
func (it *bigEndianPostings) At() storage.SeriesRef {
return storage.SeriesRef(it.cur)
}
func (it *bigEndianPostings) Next() bool {
if len(it.list) >= 4 {
it.cur = binary.BigEndian.Uint32(it.list)
it.list = it.list[4:]
return true
}
return false
}
func (it *bigEndianPostings) Seek(x storage.SeriesRef) bool {
if storage.SeriesRef(it.cur) >= x {
return true
}
num := len(it.list) / 4
// Do binary search between current position and end.
i := sort.Search(num, func(i int) bool {
return binary.BigEndian.Uint32(it.list[i*4:]) >= uint32(x)
})
if i < num {
j := i * 4
it.cur = binary.BigEndian.Uint32(it.list[j:])
it.list = it.list[j+4:]
return true
}
it.list = nil
return false
}
func (*bigEndianPostings) Err() error {
return nil
}
// FindIntersectingPostings checks the intersection of p and candidates[i] for each i in candidates,
// if intersection is non empty, then i is added to the indexes returned.
// Returned indexes are not sorted.
func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) {
h := make(postingsWithIndexHeap, 0, len(candidates))
for idx, it := range candidates {
switch {
case it.Next():
h = append(h, postingsWithIndex{index: idx, p: it})
case it.Err() != nil:
return nil, it.Err()
}
}
if h.empty() {
return nil, nil
}
heap.Init(&h)
for !h.empty() {
if !p.Seek(h.at()) {
return indexes, p.Err()
}
if p.At() == h.at() {
indexes = append(indexes, h.popIndex())
} else if err := h.next(); err != nil {
return nil, err
}
}
return indexes, nil
}
// postingsWithIndex is used as postingsWithIndexHeap elements by FindIntersectingPostings,
// keeping track of the original index of each postings while they move inside the heap.
type postingsWithIndex struct {
index int
p Postings
// popped means that these postings shouldn't be considered anymore.
// See popIndex() comment to understand why we need this.
popped bool
}
// postingsWithIndexHeap implements heap.Interface,
// with root always pointing to the postings with minimum Postings.At() value.
// It also implements a special way of removing elements that marks them as popped and moves them to the bottom of the
// heap instead of actually removing them, see popIndex() for more details.
type postingsWithIndexHeap []postingsWithIndex
// empty checks whether the heap is empty, which is true if it has no elements, of if the smallest element is popped.
func (h *postingsWithIndexHeap) empty() bool {
return len(*h) == 0 || (*h)[0].popped
}
// popIndex pops the smallest heap element and returns its index.
// In our implementation we don't actually do heap.Pop(), instead we mark the element as `popped` and fix its position, which
// should be after all the non-popped elements according to our sorting strategy.
// By skipping the `heap.Pop()` call we avoid an extra allocation in this heap's Pop() implementation which returns an interface{}.
func (h *postingsWithIndexHeap) popIndex() int {
index := (*h)[0].index
(*h)[0].popped = true
heap.Fix(h, 0)
return index
}
// at provides the storage.SeriesRef where root Postings is pointing at this moment.
func (h postingsWithIndexHeap) at() storage.SeriesRef { return h[0].p.At() }
// next performs the Postings.Next() operation on the root of the heap, performing the related operation on the heap
// and conveniently returning the result of calling Postings.Err() if the result of calling Next() was false.
// If Next() succeeds, heap is fixed to move the root to its new position, according to its Postings.At() value.
// If Next() returns fails and there's no error reported by Postings.Err(), then root is marked as removed and heap is fixed.
func (h *postingsWithIndexHeap) next() error {
pi := (*h)[0]
next := pi.p.Next()
if next {
heap.Fix(h, 0)
return nil
}
if err := pi.p.Err(); err != nil {
return fmt.Errorf("postings %d: %w", pi.index, err)
}
h.popIndex()
return nil
}
// Len implements heap.Interface.
// Notice that Len() > 0 does not imply that heap is not empty as elements are not removed from this heap.
// Use empty() to check whether heap is empty or not.
func (h postingsWithIndexHeap) Len() int { return len(h) }
// Less implements heap.Interface, it puts all the popped elements at the bottom,
// and then sorts by Postings.At() property of each node.
func (h postingsWithIndexHeap) Less(i, j int) bool {
if h[i].popped != h[j].popped {
return h[j].popped
}
return h[i].p.At() < h[j].p.At()
}
// Swap implements heap.Interface.
func (h *postingsWithIndexHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] }
// Push implements heap.Interface.
func (h *postingsWithIndexHeap) Push(x any) {
*h = append(*h, x.(postingsWithIndex))
}
// Pop implements heap.Interface and pops the last element, which is NOT the min element,
// so this doesn't return the same heap.Pop()
// Although this method is implemented for correctness, we don't expect it to be used, see popIndex() method for details.
func (h *postingsWithIndexHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/index/postingsstats_test.go | tsdb/index/postingsstats_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestPostingsStats(t *testing.T) {
stats := &maxHeap{}
const maxCount = 3000000
const heapLength = 10
stats.init(heapLength)
for i := range maxCount {
item := Stat{
Name: "Label-da",
Count: uint64(i),
}
stats.push(item)
}
stats.push(Stat{Name: "Stuff", Count: 3000000})
data := stats.get()
require.Len(t, data, 10)
for i := range heapLength {
require.Equal(t, uint64(maxCount-i), data[i].Count)
}
}
func TestPostingsStats2(t *testing.T) {
stats := &maxHeap{}
const heapLength = 10
stats.init(heapLength)
stats.push(Stat{Name: "Stuff", Count: 10})
stats.push(Stat{Name: "Stuff", Count: 11})
stats.push(Stat{Name: "Stuff", Count: 1})
stats.push(Stat{Name: "Stuff", Count: 6})
data := stats.get()
require.Len(t, data, 4)
require.Equal(t, uint64(11), data[0].Count)
}
func BenchmarkPostingStatsMaxHep(b *testing.B) {
stats := &maxHeap{}
const maxCount = 9000000
const heapLength = 10
for b.Loop() {
stats.init(heapLength)
for i := range maxCount {
item := Stat{
Name: "Label-da",
Count: uint64(i),
}
stats.push(item)
}
stats.get()
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/errors/errors_test.go | tsdb/errors/errors_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"context"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestMultiError_Is(t *testing.T) {
customErr1 := errors.New("test error 1")
customErr2 := errors.New("test error 2")
testCases := map[string]struct {
sourceErrors []error
target error
is bool
}{
"adding a context cancellation doesn't lose the information": {
sourceErrors: []error{context.Canceled},
target: context.Canceled,
is: true,
},
"adding multiple context cancellations doesn't lose the information": {
sourceErrors: []error{context.Canceled, context.Canceled},
target: context.Canceled,
is: true,
},
"adding wrapped context cancellations doesn't lose the information": {
sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled)},
target: context.Canceled,
is: true,
},
"adding a nil error doesn't lose the information": {
sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled), nil},
target: context.Canceled,
is: true,
},
"errors with no context cancellation error are not a context canceled error": {
sourceErrors: []error{errors.New("first error"), errors.New("second error")},
target: context.Canceled,
is: false,
},
"no errors are not a context canceled error": {
sourceErrors: nil,
target: context.Canceled,
is: false,
},
"no errors are a nil error": {
sourceErrors: nil,
target: nil,
is: true,
},
"nested multi-error contains customErr1": {
sourceErrors: []error{
customErr1,
NewMulti(
customErr2,
fmt.Errorf("wrapped %w", context.Canceled),
).Err(),
},
target: customErr1,
is: true,
},
"nested multi-error contains customErr2": {
sourceErrors: []error{
customErr1,
NewMulti(
customErr2,
fmt.Errorf("wrapped %w", context.Canceled),
).Err(),
},
target: customErr2,
is: true,
},
"nested multi-error contains wrapped context.Canceled": {
sourceErrors: []error{
customErr1,
NewMulti(
customErr2,
fmt.Errorf("wrapped %w", context.Canceled),
).Err(),
},
target: context.Canceled,
is: true,
},
"nested multi-error does not contain context.DeadlineExceeded": {
sourceErrors: []error{
customErr1,
NewMulti(
customErr2,
fmt.Errorf("wrapped %w", context.Canceled),
).Err(),
},
target: context.DeadlineExceeded,
is: false, // make sure we still return false in valid cases
},
}
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
mErr := NewMulti(testCase.sourceErrors...)
require.Equal(t, testCase.is, errors.Is(mErr.Err(), testCase.target))
})
}
}
func TestMultiError_As(t *testing.T) {
tE1 := testError{"error cause 1"}
tE2 := testError{"error cause 2"}
var target testError
testCases := map[string]struct {
sourceErrors []error
target error
as bool
}{
"MultiError containing only a testError can be cast to that testError": {
sourceErrors: []error{tE1},
target: tE1,
as: true,
},
"MultiError containing multiple testErrors can be cast to the first testError added": {
sourceErrors: []error{tE1, tE2},
target: tE1,
as: true,
},
"MultiError containing multiple errors can be cast to the first testError added": {
sourceErrors: []error{context.Canceled, tE1, context.DeadlineExceeded, tE2},
target: tE1,
as: true,
},
"MultiError not containing a testError cannot be cast to a testError": {
sourceErrors: []error{context.Canceled, context.DeadlineExceeded},
as: false,
},
}
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
mErr := NewMulti(testCase.sourceErrors...).Err()
if testCase.as {
require.ErrorAs(t, mErr, &target)
require.Equal(t, testCase.target, target)
} else {
require.NotErrorAs(t, mErr, &target)
}
})
}
}
type testError struct {
cause string
}
func (e testError) Error() string {
return fmt.Sprintf("testError[cause: %s]", e.cause)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/errors/errors.go | tsdb/errors/errors.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"bytes"
"errors"
"fmt"
"io"
)
// multiError type allows combining multiple errors into one.
type multiError []error
// NewMulti returns multiError with provided errors added if not nil.
func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return
m := multiError{}
m.Add(errs...)
return m
}
// Add adds single or many errors to the error list. Each error is added only if not nil.
// If the error is a nonNilMultiError type, the errors inside nonNilMultiError are added to the main multiError.
func (es *multiError) Add(errs ...error) {
for _, err := range errs {
if err == nil {
continue
}
var merr nonNilMultiError
if errors.As(err, &merr) {
*es = append(*es, merr.errs...)
continue
}
*es = append(*es, err)
}
}
// Err returns the error list as an error or nil if it is empty.
func (es multiError) Err() error {
if len(es) == 0 {
return nil
}
return nonNilMultiError{errs: es}
}
// nonNilMultiError implements the error interface, and it represents
// multiError with at least one error inside it.
// This type is needed to make sure that nil is returned when no error is combined in multiError for err != nil
// check to work.
type nonNilMultiError struct {
errs multiError
}
// Error returns a concatenated string of the contained errors.
func (es nonNilMultiError) Error() string {
var buf bytes.Buffer
if len(es.errs) > 1 {
fmt.Fprintf(&buf, "%d errors: ", len(es.errs))
}
for i, err := range es.errs {
if i != 0 {
buf.WriteString("; ")
}
buf.WriteString(err.Error())
}
return buf.String()
}
// Is attempts to match the provided error against errors in the error list.
//
// This function allows errors.Is to traverse the values stored in the MultiError.
// It returns true if any of the errors in the list match the target.
func (es nonNilMultiError) Is(target error) bool {
for _, err := range es.errs {
if errors.Is(err, target) {
return true
}
}
return false
}
// Unwrap returns the list of errors contained in the multiError.
func (es nonNilMultiError) Unwrap() []error {
return es.errs
}
// CloseAll closes all given closers while recording error in MultiError.
func CloseAll(cs []io.Closer) error {
errs := NewMulti()
for _, c := range cs {
errs.Add(c.Close())
}
return errs.Err()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/queue_test.go | tsdb/chunks/queue_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"math/rand"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
)
func (q *writeJobQueue) assertInvariants(t *testing.T) {
q.mtx.Lock()
defer q.mtx.Unlock()
totalSize := 0
for s := q.first; s != nil; s = s.nextSegment {
require.NotNil(t, s.segment)
// Next read index is lower or equal than next write index (we cannot past written jobs)
require.LessOrEqual(t, s.nextRead, s.nextWrite)
// Number of unread elements in this segment.
totalSize += s.nextWrite - s.nextRead
// First segment can be partially read, other segments were not read yet.
if s == q.first {
require.GreaterOrEqual(t, s.nextRead, 0)
} else {
require.Equal(t, 0, s.nextRead)
}
// If first shard is empty (everything was read from it already), it must have extra capacity for
// additional elements, otherwise it would have been removed.
if s == q.first && s.nextRead == s.nextWrite {
require.Less(t, s.nextWrite, len(s.segment))
}
// Segments in the middle are full.
if s != q.first && s != q.last {
require.Len(t, s.segment, s.nextWrite)
}
// Last segment must have at least one element, or we wouldn't have created it.
require.Positive(t, s.nextWrite)
}
require.Equal(t, q.size, totalSize)
}
func TestQueuePushPopSingleGoroutine(t *testing.T) {
seed := time.Now().UnixNano()
t.Log("seed:", seed)
r := rand.New(rand.NewSource(seed))
const maxSize = 500
const maxIters = 50
for maxCount := 1; maxCount < maxSize; maxCount++ {
queue := newWriteJobQueue(maxCount, 1+(r.Int()%maxCount))
elements := 0 // total elements in the queue
lastWriteID := 0
lastReadID := 0
for range maxIters {
if elements < maxCount {
toWrite := r.Int() % (maxCount - elements)
if toWrite == 0 {
toWrite = 1
}
for i := 0; i < toWrite; i++ {
lastWriteID++
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(lastWriteID)}))
elements++
}
}
if elements > 0 {
toRead := r.Int() % elements
if toRead == 0 {
toRead = 1
}
for i := 0; i < toRead; i++ {
lastReadID++
j, b := queue.pop()
require.True(t, b)
require.Equal(t, HeadSeriesRef(lastReadID), j.seriesRef)
elements--
}
}
require.Equal(t, elements, queue.length())
queue.assertInvariants(t)
}
}
}
func TestQueuePushBlocksOnFullQueue(t *testing.T) {
queue := newWriteJobQueue(5, 5)
pushTime := make(chan time.Time)
go func() {
require.True(t, queue.push(chunkWriteJob{seriesRef: 1}))
require.True(t, queue.push(chunkWriteJob{seriesRef: 2}))
require.True(t, queue.push(chunkWriteJob{seriesRef: 3}))
require.True(t, queue.push(chunkWriteJob{seriesRef: 4}))
require.True(t, queue.push(chunkWriteJob{seriesRef: 5}))
pushTime <- time.Now()
// This will block
require.True(t, queue.push(chunkWriteJob{seriesRef: 6}))
pushTime <- time.Now()
}()
timeBeforePush := <-pushTime
delay := 100 * time.Millisecond
select {
case <-time.After(delay):
// ok
case <-pushTime:
require.Fail(t, "didn't expect another push to proceed")
}
popTime := time.Now()
j, b := queue.pop()
require.True(t, b)
require.Equal(t, HeadSeriesRef(1), j.seriesRef)
timeAfterPush := <-pushTime
require.GreaterOrEqual(t, timeAfterPush.Sub(popTime), time.Duration(0))
require.GreaterOrEqual(t, timeAfterPush.Sub(timeBeforePush), delay)
}
func TestQueuePopBlocksOnEmptyQueue(t *testing.T) {
queue := newWriteJobQueue(5, 5)
popTime := make(chan time.Time)
go func() {
j, b := queue.pop()
require.True(t, b)
require.Equal(t, HeadSeriesRef(1), j.seriesRef)
popTime <- time.Now()
// This will block
j, b = queue.pop()
require.True(t, b)
require.Equal(t, HeadSeriesRef(2), j.seriesRef)
popTime <- time.Now()
}()
queue.push(chunkWriteJob{seriesRef: 1})
timeBeforePop := <-popTime
delay := 100 * time.Millisecond
select {
case <-time.After(delay):
// ok
case <-popTime:
require.Fail(t, "didn't expect another pop to proceed")
}
pushTime := time.Now()
require.True(t, queue.push(chunkWriteJob{seriesRef: 2}))
timeAfterPop := <-popTime
require.GreaterOrEqual(t, timeAfterPop.Sub(pushTime), time.Duration(0))
require.Greater(t, timeAfterPop.Sub(timeBeforePop), delay)
}
func TestQueuePopUnblocksOnClose(t *testing.T) {
queue := newWriteJobQueue(5, 5)
popTime := make(chan time.Time)
go func() {
j, b := queue.pop()
require.True(t, b)
require.Equal(t, HeadSeriesRef(1), j.seriesRef)
popTime <- time.Now()
// This will block until queue is closed.
j, b = queue.pop()
require.False(t, b)
popTime <- time.Now()
}()
queue.push(chunkWriteJob{seriesRef: 1})
timeBeforePop := <-popTime
delay := 100 * time.Millisecond
select {
case <-time.After(delay):
// ok
case <-popTime:
require.Fail(t, "didn't expect another pop to proceed")
}
closeTime := time.Now()
queue.close()
timeAfterPop := <-popTime
require.GreaterOrEqual(t, timeAfterPop.Sub(closeTime), time.Duration(0))
require.GreaterOrEqual(t, timeAfterPop.Sub(timeBeforePop), delay)
}
func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) {
const count = 10
queue := newWriteJobQueue(count, count)
for i := range count {
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(i)}))
}
// close the queue before popping all elements.
queue.close()
// No more pushing allowed after close.
require.False(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(11111)}))
// Verify that we can still read all pushed elements.
for i := range count {
j, b := queue.pop()
require.True(t, b)
require.Equal(t, HeadSeriesRef(i), j.seriesRef)
}
_, b := queue.pop()
require.False(t, b)
}
func TestQueuePushPopManyGoroutines(t *testing.T) {
const readGoroutines = 5
const writeGoroutines = 10
const writes = 500
queue := newWriteJobQueue(1024, 64)
// Reading goroutine
refsMx := sync.Mutex{}
refs := map[HeadSeriesRef]bool{}
readersWG := sync.WaitGroup{}
for range readGoroutines {
readersWG.Add(1)
go func() {
defer readersWG.Done()
for j, ok := queue.pop(); ok; j, ok = queue.pop() {
refsMx.Lock()
refs[j.seriesRef] = true
refsMx.Unlock()
}
}()
}
id := atomic.Uint64{}
writersWG := sync.WaitGroup{}
for range writeGoroutines {
writersWG.Add(1)
go func() {
defer writersWG.Done()
for range writes {
ref := id.Inc()
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(ref)}))
}
}()
}
// Wait until all writes are done.
writersWG.Wait()
// Close the queue and wait for reading to be done.
queue.close()
readersWG.Wait()
// Check if we have all expected values
require.Len(t, refs, writeGoroutines*writes)
}
func TestQueueSegmentIsKeptEvenIfEmpty(t *testing.T) {
queue := newWriteJobQueue(1024, 64)
require.True(t, queue.push(chunkWriteJob{seriesRef: 1}))
_, b := queue.pop()
require.True(t, b)
require.NotNil(t, queue.first)
require.Equal(t, 1, queue.first.nextRead)
require.Equal(t, 1, queue.first.nextWrite)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/chunks_test.go | tsdb/chunks/chunks_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
func TestReaderWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
r := &Reader{bs: []ByteSlice{b}}
_, _, err := r.ChunkOrIterable(Meta{Ref: 0})
require.Error(t, err)
}
func TestWriterWithDefaultSegmentSize(t *testing.T) {
chk1, err := ChunkFromSamples([]Sample{
sample{t: 10, f: 11},
sample{t: 20, f: 12},
sample{t: 30, f: 13},
})
require.NoError(t, err)
chk2, err := ChunkFromSamples([]Sample{
sample{t: 40, h: tsdbutil.GenerateTestHistogram(1)},
sample{t: 50, h: tsdbutil.GenerateTestHistogram(2)},
sample{t: 60, h: tsdbutil.GenerateTestHistogram(3)},
})
require.NoError(t, err)
dir := t.TempDir()
w, err := NewWriter(dir)
require.NoError(t, err)
err = w.WriteChunks(chk1, chk2)
require.NoError(t, err)
require.NoError(t, w.Close())
d, err := os.ReadDir(dir)
require.NoError(t, err)
require.Len(t, d, 1, "expected only one segment to be created to hold both chunks")
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/head_chunks.go | tsdb/chunks/head_chunks.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"math"
"os"
"path/filepath"
"slices"
"strconv"
"sync"
"github.com/dennwc/varint"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
// Head chunk file header fields constants.
const (
// MagicHeadChunks is 4 bytes at the beginning of a head chunk file.
MagicHeadChunks = 0x0130BC91
headChunksFormatV1 = 1
)
// ErrChunkDiskMapperClosed returned by any method indicates
// that the ChunkDiskMapper was closed.
var ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed")
const (
// MintMaxtSize is the size of the mint/maxt for head chunk file and chunks.
MintMaxtSize = 8
// SeriesRefSize is the size of series reference on disk.
SeriesRefSize = 8
// HeadChunkFileHeaderSize is the total size of the header for a head chunk file.
HeadChunkFileHeaderSize = SegmentHeaderSize
// MaxHeadChunkFileSize is the max size of a head chunk file.
MaxHeadChunkFileSize = 128 * 1024 * 1024 // 128 MiB.
// CRCSize is the size of crc32 sum on disk.
CRCSize = 4
// MaxHeadChunkMetaSize is the max size of an mmapped chunks minus the chunks data.
// Max because the uvarint size can be smaller.
MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunkEncodingSize + MaxChunkLengthFieldSize + CRCSize
// MinWriteBufferSize is the minimum write buffer size allowed.
MinWriteBufferSize = 64 * 1024 // 64KB.
// MaxWriteBufferSize is the maximum write buffer size allowed.
MaxWriteBufferSize = 8 * 1024 * 1024 // 8 MiB.
// DefaultWriteBufferSize is the default write buffer size.
DefaultWriteBufferSize = 4 * 1024 * 1024 // 4 MiB.
// DefaultWriteQueueSize is the default size of the in-memory queue used before flushing chunks to the disk.
// A value of 0 completely disables this feature.
DefaultWriteQueueSize = 0
)
// ChunkDiskMapperRef represents the location of a head chunk on disk.
// The upper 4 bytes hold the index of the head chunk file and
// the lower 4 bytes hold the byte offset in the head chunk file where the chunk starts.
type ChunkDiskMapperRef uint64
func newChunkDiskMapperRef(seq, offset uint64) ChunkDiskMapperRef {
return ChunkDiskMapperRef((seq << 32) | offset)
}
func (ref ChunkDiskMapperRef) Unpack() (seq, offset int) {
seq = int(ref >> 32)
offset = int((ref << 32) >> 32)
return seq, offset
}
func (ref ChunkDiskMapperRef) GreaterThanOrEqualTo(r ChunkDiskMapperRef) bool {
s1, o1 := ref.Unpack()
s2, o2 := r.Unpack()
return s1 > s2 || (s1 == s2 && o1 >= o2)
}
func (ref ChunkDiskMapperRef) GreaterThan(r ChunkDiskMapperRef) bool {
s1, o1 := ref.Unpack()
s2, o2 := r.Unpack()
return s1 > s2 || (s1 == s2 && o1 > o2)
}
// CorruptionErr is an error that's returned when corruption is encountered.
type CorruptionErr struct {
Dir string
FileIndex int
Err error
}
func (e *CorruptionErr) Error() string {
return fmt.Errorf("corruption in head chunk file %s: %w", segmentFile(e.Dir, e.FileIndex), e.Err).Error()
}
func (e *CorruptionErr) Unwrap() error {
return e.Err
}
// chunkPos keeps track of the position in the head chunk files.
// chunkPos is not thread-safe, a lock must be used to protect it.
type chunkPos struct {
seq uint64 // Index of chunk file.
offset uint64 // Offset within chunk file.
cutFile bool // When true then the next chunk will be written to a new file.
}
// getNextChunkRef takes a chunk and returns the chunk reference which will refer to it once it has been written.
// getNextChunkRef also decides whether a new file should be cut before writing this chunk, and it returns the decision via the second return value.
// The order of calling getNextChunkRef must be the order in which chunks are written to the disk.
func (f *chunkPos) getNextChunkRef(chk chunkenc.Chunk) (chkRef ChunkDiskMapperRef, cutFile bool) {
chkLen := uint64(len(chk.Bytes()))
bytesToWrite := f.bytesToWriteForChunk(chkLen)
if f.shouldCutNewFile(bytesToWrite) {
f.toNewFile()
f.cutFile = false
cutFile = true
}
chkOffset := f.offset
f.offset += bytesToWrite
return newChunkDiskMapperRef(f.seq, chkOffset), cutFile
}
// toNewFile updates the seq/offset position to point to the beginning of a new chunk file.
func (f *chunkPos) toNewFile() {
f.seq++
f.offset = SegmentHeaderSize
}
// cutFileOnNextChunk triggers that the next chunk will be written in to a new file.
// Not thread safe, a lock must be held when calling this.
func (f *chunkPos) cutFileOnNextChunk() {
f.cutFile = true
}
// setSeq sets the sequence number of the head chunk file.
func (f *chunkPos) setSeq(seq uint64) {
f.seq = seq
}
// shouldCutNewFile returns whether a new file should be cut based on the file size.
// Not thread safe, a lock must be held when calling this.
func (f *chunkPos) shouldCutNewFile(bytesToWrite uint64) bool {
if f.cutFile {
return true
}
return f.offset == 0 || // First head chunk file.
f.offset+bytesToWrite > MaxHeadChunkFileSize // Exceeds the max head chunk file size.
}
// bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size,
// including all meta data before and after the chunk data.
// Head chunk format: https://github.com/prometheus/prometheus/blob/main/tsdb/docs/format/head_chunks.md#chunk
func (*chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 {
// Headers.
bytes := uint64(SeriesRefSize) + 2*MintMaxtSize + ChunkEncodingSize
// Size of chunk length encoded as uvarint.
bytes += uint64(varint.UvarintSize(chkLen))
// Chunk length.
bytes += chkLen
// crc32.
bytes += CRCSize
return bytes
}
// ChunkDiskMapper is for writing the Head block chunks to disk
// and access chunks via mmapped files.
type ChunkDiskMapper struct {
// Writer.
dir *os.File
writeBufferSize int
curFile *os.File // File being written to.
curFileSequence int // Index of current open file being appended to. 0 if no file is active.
curFileOffset atomic.Uint64 // Bytes written in current open file.
curFileMaxt int64 // Used for the size retention.
// The values in evtlPos represent the file position which will eventually be
// reached once the content of the write queue has been fully processed.
evtlPosMtx sync.Mutex
evtlPos chunkPos
byteBuf [MaxHeadChunkMetaSize]byte // Buffer used to write the header of the chunk.
chkWriter *bufio.Writer // Writer for the current open file.
crc32 hash.Hash
writePathMtx sync.Mutex
// Reader.
// The int key in the map is the file number on the disk.
mmappedChunkFiles map[int]*mmappedChunkFile // Contains the m-mapped files for each chunk file mapped with its index.
closers map[int]io.Closer // Closers for resources behind the byte slices.
readPathMtx sync.RWMutex // Mutex used to protect the above 2 maps.
pool chunkenc.Pool // This is used when fetching a chunk from the disk to allocate a chunk.
// Writer and Reader.
// We flush chunks to disk in batches. Hence, we store them in this buffer
// from which chunks are served till they are flushed and are ready for m-mapping.
chunkBuffer *chunkBuffer
// Whether the maxt field is set for all mmapped chunk files tracked within the mmappedChunkFiles map.
// This is done after iterating through all the chunks in those files using the IterateAllChunks method.
fileMaxtSet bool
writeQueue *chunkWriteQueue
closed bool
}
// mmappedChunkFile provides mmap access to an entire head chunks file that holds many chunks.
type mmappedChunkFile struct {
byteSlice ByteSlice
maxt int64 // Max timestamp among all of this file's chunks.
}
// NewChunkDiskMapper returns a new ChunkDiskMapper against the given directory
// using the default head chunk file duration.
// NOTE: 'IterateAllChunks' method needs to be called at least once after creating ChunkDiskMapper
// to set the maxt of all files.
func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Pool, writeBufferSize, writeQueueSize int) (*ChunkDiskMapper, error) {
// Validate write buffer size.
if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize {
return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize)
}
if writeBufferSize%1024 != 0 {
return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize)
}
if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
dirFile, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
m := &ChunkDiskMapper{
dir: dirFile,
pool: pool,
writeBufferSize: writeBufferSize,
crc32: newCRC32(),
chunkBuffer: newChunkBuffer(),
}
if writeQueueSize > 0 {
m.writeQueue = newChunkWriteQueue(reg, writeQueueSize, m.writeChunk)
}
if m.pool == nil {
m.pool = chunkenc.NewPool()
}
return m, m.openMMapFiles()
}
// Chunk encodings for out-of-order chunks.
// These encodings must be only used by the Head block for its internal bookkeeping.
const (
OutOfOrderMask = uint8(0b10000000)
)
func (*ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
enc := uint8(sourceEncoding) | OutOfOrderMask
return chunkenc.Encoding(enc)
}
func (*ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool {
return (uint8(e) & OutOfOrderMask) != 0
}
func (*ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
restored := uint8(sourceEncoding) & (^OutOfOrderMask)
return chunkenc.Encoding(restored)
}
// openMMapFiles opens all files within dir for mmapping.
func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{}
cdm.closers = map[int]io.Closer{}
defer func() {
if returnErr != nil {
returnErr = tsdb_errors.NewMulti(returnErr, closeAllFromMap(cdm.closers)).Err()
cdm.mmappedChunkFiles = nil
cdm.closers = nil
}
}()
files, err := listChunkFiles(cdm.dir.Name())
if err != nil {
return err
}
files, err = repairLastChunkFile(files)
if err != nil {
return err
}
chkFileIndices := make([]int, 0, len(files))
for seq, fn := range files {
f, err := fileutil.OpenMmapFile(fn)
if err != nil {
return fmt.Errorf("mmap files, file: %s: %w", fn, err)
}
cdm.closers[seq] = f
cdm.mmappedChunkFiles[seq] = &mmappedChunkFile{byteSlice: realByteSlice(f.Bytes())}
chkFileIndices = append(chkFileIndices, seq)
}
// Check for gaps in the files.
slices.Sort(chkFileIndices)
if len(chkFileIndices) == 0 {
return nil
}
lastSeq := chkFileIndices[0]
for _, seq := range chkFileIndices[1:] {
if seq != lastSeq+1 {
return fmt.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq)
}
lastSeq = seq
}
for i, b := range cdm.mmappedChunkFiles {
if b.byteSlice.Len() < HeadChunkFileHeaderSize {
return fmt.Errorf("%s: invalid head chunk file header: %w", files[i], errInvalidSize)
}
// Verify magic number.
if m := binary.BigEndian.Uint32(b.byteSlice.Range(0, MagicChunksSize)); m != MagicHeadChunks {
return fmt.Errorf("%s: invalid magic number %x", files[i], m)
}
// Verify chunk format version.
if v := int(b.byteSlice.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 {
return fmt.Errorf("%s: invalid chunk format version %d", files[i], v)
}
}
cdm.evtlPos.setSeq(uint64(lastSeq))
return nil
}
func listChunkFiles(dir string) (map[int]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
res := map[int]string{}
for _, fi := range files {
seq, err := strconv.ParseUint(fi.Name(), 10, 64)
if err != nil {
continue
}
res[int(seq)] = filepath.Join(dir, fi.Name())
}
return res, nil
}
// HardLinkChunkFiles creates hardlinks for chunk files from src to dst.
// It does nothing if src doesn't exist and ensures dst is created if not.
func HardLinkChunkFiles(src, dst string) error {
_, err := os.Stat(src)
if os.IsNotExist(err) {
return nil
}
if err != nil {
return fmt.Errorf("check source chunks dir: %w", err)
}
if err := os.MkdirAll(dst, 0o777); err != nil {
return fmt.Errorf("set up destination chunks dir: %w", err)
}
files, err := listChunkFiles(src)
if err != nil {
return fmt.Errorf("list chunks: %w", err)
}
for _, filePath := range files {
_, fileName := filepath.Split(filePath)
err := os.Link(filepath.Join(src, fileName), filepath.Join(dst, fileName))
if err != nil {
return fmt.Errorf("hardlink a chunk: %w", err)
}
}
return nil
}
// repairLastChunkFile deletes the last file if it's empty.
// Because we don't fsync when creating these files, we could end
// up with an empty file at the end during an abrupt shutdown.
func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr error) {
lastFile := -1
for seq := range files {
if seq > lastFile {
lastFile = seq
}
}
if lastFile <= 0 {
return files, nil
}
f, err := os.Open(files[lastFile])
if err != nil {
return files, fmt.Errorf("open file during last head chunk file repair: %w", err)
}
buf := make([]byte, MagicChunksSize)
size, err := f.Read(buf)
if err != nil && !errors.Is(err, io.EOF) {
return files, fmt.Errorf("failed to read magic number during last head chunk file repair: %w", err)
}
if err := f.Close(); err != nil {
return files, fmt.Errorf("close file during last head chunk file repair: %w", err)
}
// We either don't have enough bytes for the magic number or the magic number is 0.
// NOTE: we should not check for wrong magic number here because that error
// needs to be sent up the function called (already done elsewhere)
// for proper repair mechanism to happen in the Head.
if size < MagicChunksSize || binary.BigEndian.Uint32(buf) == 0 {
// Corrupt file, hence remove it.
if err := os.RemoveAll(files[lastFile]); err != nil {
return files, fmt.Errorf("delete corrupted, empty head chunk file during last file repair: %w", err)
}
delete(files, lastFile)
}
return files, nil
}
// WriteChunk writes the chunk to disk.
// The returned chunk ref is the reference from where the chunk encoding starts for the chunk.
func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef ChunkDiskMapperRef) {
// cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue).
cdm.evtlPosMtx.Lock()
defer cdm.evtlPosMtx.Unlock()
ref, cutFile := cdm.evtlPos.getNextChunkRef(chk)
if cdm.writeQueue != nil {
return cdm.writeChunkViaQueue(ref, isOOO, cutFile, seriesRef, mint, maxt, chk, callback)
}
err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile)
if callback != nil {
callback(err)
}
return ref
}
func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, isOOO, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
var err error
if callback != nil {
defer func() {
if err != nil {
callback(err)
}
}()
}
err = cdm.writeQueue.addJob(chunkWriteJob{
cutFile: cutFile,
seriesRef: seriesRef,
mint: mint,
maxt: maxt,
chk: chk,
ref: ref,
isOOO: isOOO,
callback: callback,
})
return ref
}
func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) (err error) {
cdm.writePathMtx.Lock()
defer cdm.writePathMtx.Unlock()
if cdm.closed {
return ErrChunkDiskMapperClosed
}
if cutFile {
err := cdm.cutAndExpectRef(ref)
if err != nil {
return err
}
}
// if len(chk.Bytes())+MaxHeadChunkMetaSize >= writeBufferSize, it means that chunk >= the buffer size;
// so no need to flush here, as we have to flush at the end (to not keep partial chunks in buffer).
if len(chk.Bytes())+MaxHeadChunkMetaSize < cdm.writeBufferSize && cdm.chkWriter.Available() < MaxHeadChunkMetaSize+len(chk.Bytes()) {
if err := cdm.flushBuffer(); err != nil {
return err
}
}
cdm.crc32.Reset()
bytesWritten := 0
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(seriesRef))
bytesWritten += SeriesRefSize
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(mint))
bytesWritten += MintMaxtSize
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(maxt))
bytesWritten += MintMaxtSize
enc := chk.Encoding()
if isOOO {
enc = cdm.ApplyOutOfOrderMask(enc)
}
cdm.byteBuf[bytesWritten] = byte(enc)
bytesWritten += ChunkEncodingSize
n := binary.PutUvarint(cdm.byteBuf[bytesWritten:], uint64(len(chk.Bytes())))
bytesWritten += n
if err := cdm.writeAndAppendToCRC32(cdm.byteBuf[:bytesWritten]); err != nil {
return err
}
if err := cdm.writeAndAppendToCRC32(chk.Bytes()); err != nil {
return err
}
if err := cdm.writeCRC32(); err != nil {
return err
}
if maxt > cdm.curFileMaxt {
cdm.curFileMaxt = maxt
}
cdm.chunkBuffer.put(ref, chk)
if len(chk.Bytes())+MaxHeadChunkMetaSize >= cdm.writeBufferSize {
// The chunk was bigger than the buffer itself.
// Flushing to not keep partial chunks in buffer.
if err := cdm.flushBuffer(); err != nil {
return err
}
}
return nil
}
// CutNewFile makes that a new file will be created the next time a chunk is written.
func (cdm *ChunkDiskMapper) CutNewFile() {
cdm.evtlPosMtx.Lock()
defer cdm.evtlPosMtx.Unlock()
cdm.evtlPos.cutFileOnNextChunk()
}
func (cdm *ChunkDiskMapper) IsQueueEmpty() bool {
if cdm.writeQueue == nil {
return true
}
return cdm.writeQueue.queueIsEmpty()
}
// cutAndExpectRef creates a new m-mapped file.
// The write lock should be held before calling this.
// It ensures that the position in the new file matches the given chunk reference, if not then it errors.
func (cdm *ChunkDiskMapper) cutAndExpectRef(chkRef ChunkDiskMapperRef) (err error) {
seq, offset, err := cdm.cut()
if err != nil {
return err
}
if expSeq, expOffset := chkRef.Unpack(); seq != expSeq || offset != expOffset {
return fmt.Errorf("expected newly cut file to have sequence:offset %d:%d, got %d:%d", expSeq, expOffset, seq, offset)
}
return nil
}
// cut creates a new m-mapped file. The write lock should be held before calling this.
// It returns the file sequence and the offset in that file to start writing chunks.
func (cdm *ChunkDiskMapper) cut() (seq, offset int, returnErr error) {
// Sync current tail to disk and close.
if err := cdm.finalizeCurFile(); err != nil {
return 0, 0, err
}
offset, newFile, seq, err := cutSegmentFile(cdm.dir, MagicHeadChunks, headChunksFormatV1, HeadChunkFilePreallocationSize)
if err != nil {
return 0, 0, err
}
defer func() {
// The file should not be closed if there is no error,
// its kept open in the ChunkDiskMapper.
if returnErr != nil {
returnErr = tsdb_errors.NewMulti(returnErr, newFile.Close()).Err()
}
}()
cdm.curFileOffset.Store(uint64(offset))
if cdm.curFile != nil {
cdm.readPathMtx.Lock()
cdm.mmappedChunkFiles[cdm.curFileSequence].maxt = cdm.curFileMaxt
cdm.readPathMtx.Unlock()
}
mmapFile, err := fileutil.OpenMmapFileWithSize(newFile.Name(), MaxHeadChunkFileSize)
if err != nil {
return 0, 0, err
}
cdm.readPathMtx.Lock()
cdm.curFileSequence = seq
cdm.curFile = newFile
if cdm.chkWriter != nil {
cdm.chkWriter.Reset(newFile)
} else {
cdm.chkWriter = bufio.NewWriterSize(newFile, cdm.writeBufferSize)
}
cdm.closers[cdm.curFileSequence] = mmapFile
cdm.mmappedChunkFiles[cdm.curFileSequence] = &mmappedChunkFile{byteSlice: realByteSlice(mmapFile.Bytes())}
cdm.readPathMtx.Unlock()
cdm.curFileMaxt = 0
return seq, offset, nil
}
// finalizeCurFile writes all pending data to the current tail file,
// truncates its size, and closes it.
func (cdm *ChunkDiskMapper) finalizeCurFile() error {
if cdm.curFile == nil {
return nil
}
if err := cdm.flushBuffer(); err != nil {
return err
}
if err := cdm.curFile.Sync(); err != nil {
return err
}
return cdm.curFile.Close()
}
func (cdm *ChunkDiskMapper) write(b []byte) error {
n, err := cdm.chkWriter.Write(b)
cdm.curFileOffset.Add(uint64(n))
return err
}
func (cdm *ChunkDiskMapper) writeAndAppendToCRC32(b []byte) error {
if err := cdm.write(b); err != nil {
return err
}
_, err := cdm.crc32.Write(b)
return err
}
func (cdm *ChunkDiskMapper) writeCRC32() error {
return cdm.write(cdm.crc32.Sum(cdm.byteBuf[:0]))
}
// flushBuffer flushes the current in-memory chunks.
// Assumes that writePathMtx is _write_ locked before calling this method.
func (cdm *ChunkDiskMapper) flushBuffer() error {
if err := cdm.chkWriter.Flush(); err != nil {
return err
}
cdm.chunkBuffer.clear()
return nil
}
// Chunk returns a chunk from a given reference.
func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error) {
cdm.readPathMtx.RLock()
// We hold this read lock for the entire duration because if Close()
// is called, the data in the byte slice will get corrupted as the mmapped
// file will be closed.
defer cdm.readPathMtx.RUnlock()
if cdm.closed {
return nil, ErrChunkDiskMapperClosed
}
if cdm.writeQueue != nil {
chunk := cdm.writeQueue.get(ref)
if chunk != nil {
return chunk, nil
}
}
sgmIndex, chkStart := ref.Unpack()
// We skip the series ref and the mint/maxt beforehand.
chkStart += SeriesRefSize + (2 * MintMaxtSize)
// If it is the current open file, then the chunks can be in the buffer too.
if sgmIndex == cdm.curFileSequence {
chunk := cdm.chunkBuffer.get(ref)
if chunk != nil {
return chunk, nil
}
}
mmapFile, ok := cdm.mmappedChunkFiles[sgmIndex]
if !ok {
if sgmIndex > cdm.curFileSequence {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: -1,
Err: fmt.Errorf("head chunk file index %d more than current open file", sgmIndex),
}
}
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: fmt.Errorf("head chunk file index %d does not exist on disk", sgmIndex),
}
}
if chkStart+MaxChunkLengthFieldSize > mmapFile.byteSlice.Len() {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, mmapFile.byteSlice.Len()),
}
}
// Encoding.
chkEnc := mmapFile.byteSlice.Range(chkStart, chkStart+ChunkEncodingSize)[0]
sourceChkEnc := chunkenc.Encoding(chkEnc)
// Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
chkEnc = byte(cdm.RemoveMasks(sourceChkEnc))
// Data length.
// With the minimum chunk length this should never cause us reading
// over the end of the slice.
chkDataLenStart := chkStart + ChunkEncodingSize
c := mmapFile.byteSlice.Range(chkDataLenStart, chkDataLenStart+MaxChunkLengthFieldSize)
chkDataLen, n := binary.Uvarint(c)
if n <= 0 {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: fmt.Errorf("reading chunk length failed with %d", n),
}
}
if chkDataLen > uint64(math.MaxInt) {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: fmt.Errorf("chunk length %d exceeds supported size", chkDataLen),
}
}
chkDataLenInt := int(chkDataLen)
if chkDataLenStart > math.MaxInt-n-chkDataLenInt {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: fmt.Errorf("chunk data end overflows supported size (start=%d, len=%d, n=%d)", chkDataLenStart, chkDataLenInt, n),
}
}
// Verify the chunk data end.
chkDataEnd := chkDataLenStart + n + chkDataLenInt
if chkDataEnd > mmapFile.byteSlice.Len() {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk - required:%v, available:%v", chkDataEnd, mmapFile.byteSlice.Len()),
}
}
// Check the CRC.
sum := mmapFile.byteSlice.Range(chkDataEnd, chkDataEnd+CRCSize)
if err := checkCRC32(mmapFile.byteSlice.Range(chkStart-(SeriesRefSize+2*MintMaxtSize), chkDataEnd), sum); err != nil {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: err,
}
}
// The chunk data itself.
chkData := mmapFile.byteSlice.Range(chkDataEnd-int(chkDataLen), chkDataEnd)
// Make a copy of the chunk data to prevent a panic occurring because the returned
// chunk data slice references an mmap-ed file which could be closed after the
// function returns but while the chunk is still in use.
chkDataCopy := make([]byte, len(chkData))
copy(chkDataCopy, chkData)
chk, err := cdm.pool.Get(chunkenc.Encoding(chkEnc), chkDataCopy)
if err != nil {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: err,
}
}
return chk, nil
}
// IterateAllChunks iterates all mmappedChunkFiles (in order of head chunk file name/number) and all the chunks within it
// and runs the provided function with information about each chunk. It returns on the first error encountered.
// NOTE: This method needs to be called at least once after creating ChunkDiskMapper
// to set the maxt of all files.
func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error) {
cdm.writePathMtx.Lock()
defer cdm.writePathMtx.Unlock()
defer func() {
cdm.fileMaxtSet = true
}()
// Iterate files in ascending order.
segIDs := make([]int, 0, len(cdm.mmappedChunkFiles))
for seg := range cdm.mmappedChunkFiles {
segIDs = append(segIDs, seg)
}
slices.Sort(segIDs)
for _, segID := range segIDs {
mmapFile := cdm.mmappedChunkFiles[segID]
fileEnd := mmapFile.byteSlice.Len()
if segID == cdm.curFileSequence {
fileEnd = int(cdm.curFileSize())
}
idx := HeadChunkFileHeaderSize
for idx < fileEnd {
if fileEnd-idx < MaxHeadChunkMetaSize {
// Check for all 0s which marks the end of the file.
allZeros := true
for _, b := range mmapFile.byteSlice.Range(idx, fileEnd) {
if b != byte(0) {
allZeros = false
break
}
}
if allZeros {
// End of segment chunk file content.
break
}
return &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: segID,
Err: fmt.Errorf("head chunk file has some unread data, but doesn't include enough bytes to read the chunk header"+
" - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID),
}
}
chunkRef := newChunkDiskMapperRef(uint64(segID), uint64(idx))
startIdx := idx
seriesRef := HeadSeriesRef(binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+SeriesRefSize)))
idx += SeriesRefSize
mint := int64(binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+MintMaxtSize)))
idx += MintMaxtSize
maxt := int64(binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+MintMaxtSize)))
idx += MintMaxtSize
// We preallocate file to help with m-mapping (especially windows systems).
// As series ref always starts from 1, we assume it being 0 to be the end of the actual file data.
// We are not considering possible file corruption that can cause it to be 0.
// Additionally we are checking mint and maxt just to be sure.
if seriesRef == 0 && mint == 0 && maxt == 0 {
break
}
chkEnc := chunkenc.Encoding(mmapFile.byteSlice.Range(idx, idx+ChunkEncodingSize)[0])
idx += ChunkEncodingSize
dataLen, n := binary.Uvarint(mmapFile.byteSlice.Range(idx, idx+MaxChunkLengthFieldSize))
idx += n
numSamples := binary.BigEndian.Uint16(mmapFile.byteSlice.Range(idx, idx+2))
idx += int(dataLen) // Skip the data.
// In the beginning we only checked for the chunk meta size.
// Now that we have added the chunk data length, we check for sufficient bytes again.
if idx+CRCSize > fileEnd {
return &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: segID,
Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+CRCSize, fileEnd, segID),
}
}
// Check CRC.
sum := mmapFile.byteSlice.Range(idx, idx+CRCSize)
if err := checkCRC32(mmapFile.byteSlice.Range(startIdx, idx), sum); err != nil {
return &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: segID,
Err: err,
}
}
idx += CRCSize
if maxt > mmapFile.maxt {
mmapFile.maxt = maxt
}
isOOO := cdm.IsOutOfOrderChunk(chkEnc)
// Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
chkEnc = cdm.RemoveMasks(chkEnc)
if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil {
var cerr *CorruptionErr
if errors.As(err, &cerr) {
cerr.Dir = cdm.dir.Name()
cerr.FileIndex = segID
return cerr
}
return err
}
}
if idx > fileEnd {
// It should be equal to the slice length.
return &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: segID,
Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the last chunk data - required:%v, available:%v, file:%d", idx, fileEnd, segID),
}
}
}
return nil
}
// Truncate deletes the head chunk files with numbers less than the given fileNo.
func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error {
cdm.readPathMtx.RLock()
// Sort the file indices, else if files deletion fails in between,
// it can lead to unsequential files as the map is not sorted.
chkFileIndices := make([]int, 0, len(cdm.mmappedChunkFiles))
for seq := range cdm.mmappedChunkFiles {
chkFileIndices = append(chkFileIndices, seq)
}
slices.Sort(chkFileIndices)
var removedFiles []int
for _, seq := range chkFileIndices {
if seq == cdm.curFileSequence || uint32(seq) >= fileNo {
break
}
removedFiles = append(removedFiles, seq)
}
cdm.readPathMtx.RUnlock()
errs := tsdb_errors.NewMulti()
// Cut a new file only if the current file has some chunks.
if cdm.curFileSize() > HeadChunkFileHeaderSize {
// There is a known race condition here because between the check of curFileSize() and the call to CutNewFile()
// a new file could already be cut, this is acceptable because it will simply result in an empty file which
// won't do any harm.
cdm.CutNewFile()
}
pendingDeletes, err := cdm.deleteFiles(removedFiles)
errs.Add(err)
if len(chkFileIndices) == len(removedFiles) {
// All files were deleted. Reset the current sequence.
cdm.evtlPosMtx.Lock()
// We can safely reset the sequence only if the write queue is empty. If it's not empty,
// then there may be a job in the queue that will create a new segment file with an ID
// generated before the sequence reset.
//
// The queueIsEmpty() function must be called while holding the cdm.evtlPosMtx to avoid
// a race condition with WriteChunk().
if cdm.writeQueue == nil || cdm.writeQueue.queueIsEmpty() {
if err == nil {
cdm.evtlPos.setSeq(0)
} else {
// In case of error, set it to the last file number on the disk that was not deleted.
cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1]))
}
}
cdm.evtlPosMtx.Unlock()
}
return errs.Err()
}
// deleteFiles deletes the given file sequences in order of the sequence.
// In case of an error, it returns the sorted file sequences that were not deleted from the _disk_.
func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) ([]int, error) {
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/head_chunks_other.go | tsdb/chunks/head_chunks_other.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
package chunks
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// Windows needs pre-allocations while the other OS does not. But we observed that a 0 pre-allocation causes unit tests to flake.
// This small allocation for non-Windows OSes removes the flake.
var HeadChunkFilePreallocationSize int64 = MinWriteBufferSize * 2
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/chunk_write_queue.go | tsdb/chunks/chunk_write_queue.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"errors"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
const (
// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again.
chunkRefMapShrinkThreshold = 1000
// Minimum interval between shrinking of chunkWriteQueue.chunkRefMap.
chunkRefMapMinShrinkInterval = 10 * time.Minute
// Maximum size of segment used by job queue (number of elements). With chunkWriteJob being 64 bytes,
// this will use ~512 KiB for empty queue.
maxChunkQueueSegmentSize = 8192
)
type chunkWriteJob struct {
cutFile bool
seriesRef HeadSeriesRef
mint int64
maxt int64
chk chunkenc.Chunk
ref ChunkDiskMapperRef
isOOO bool
callback func(error)
}
// chunkWriteQueue is a queue for writing chunks to disk in a non-blocking fashion.
// Chunks that shall be written get added to the queue, which is consumed asynchronously.
// Adding jobs to the queue is non-blocking as long as the queue isn't full.
type chunkWriteQueue struct {
jobs *writeJobQueue
chunkRefMapMtx sync.RWMutex
chunkRefMap map[ChunkDiskMapperRef]chunkenc.Chunk
chunkRefMapPeakSize int // Largest size that chunkRefMap has grown to since the last time we shrank it.
chunkRefMapLastShrink time.Time // When the chunkRefMap has been shrunk the last time.
// isRunningMtx serves two purposes:
// 1. It protects isRunning field.
// 2. It serializes adding of jobs to the chunkRefMap in addJob() method. If jobs channel is full then addJob() will block
// while holding this mutex, which guarantees that chunkRefMap won't ever grow beyond the queue size + 1.
isRunningMtx sync.Mutex
isRunning bool // Used to prevent that new jobs get added to the queue when the chan is already closed.
workerWg sync.WaitGroup
writeChunk writeChunkF
// Keeping separate counters instead of only a single CounterVec to improve the performance of the critical
// addJob() method which otherwise would need to perform a WithLabelValues call on the CounterVec.
adds prometheus.Counter
gets prometheus.Counter
completed prometheus.Counter
shrink prometheus.Counter
}
// writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests.
type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error
func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue {
counters := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_tsdb_chunk_write_queue_operations_total",
Help: "Number of operations on the chunk_write_queue.",
},
[]string{"operation"},
)
segmentSize := min(size, maxChunkQueueSegmentSize)
q := &chunkWriteQueue{
jobs: newWriteJobQueue(size, segmentSize),
chunkRefMap: make(map[ChunkDiskMapperRef]chunkenc.Chunk),
chunkRefMapLastShrink: time.Now(),
writeChunk: writeChunk,
adds: counters.WithLabelValues("add"),
gets: counters.WithLabelValues("get"),
completed: counters.WithLabelValues("complete"),
shrink: counters.WithLabelValues("shrink"),
}
if reg != nil {
reg.MustRegister(counters)
}
q.start()
return q
}
func (c *chunkWriteQueue) start() {
c.workerWg.Add(1)
go func() {
defer c.workerWg.Done()
for {
job, ok := c.jobs.pop()
if !ok {
return
}
c.processJob(job)
}
}()
c.isRunningMtx.Lock()
c.isRunning = true
c.isRunningMtx.Unlock()
}
func (c *chunkWriteQueue) processJob(job chunkWriteJob) {
err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.isOOO, job.cutFile)
if job.callback != nil {
job.callback(err)
}
c.chunkRefMapMtx.Lock()
defer c.chunkRefMapMtx.Unlock()
delete(c.chunkRefMap, job.ref)
c.completed.Inc()
c.shrinkChunkRefMap()
}
// shrinkChunkRefMap checks whether the conditions to shrink the chunkRefMap are met,
// if so chunkRefMap is reinitialized. The chunkRefMapMtx must be held when calling this method.
//
// We do this because Go runtime doesn't release internal memory used by map after map has been emptied.
// To achieve that we create new map instead and throw the old one away.
func (c *chunkWriteQueue) shrinkChunkRefMap() {
if len(c.chunkRefMap) > 0 {
// Can't shrink it while there is data in it.
return
}
if c.chunkRefMapPeakSize < chunkRefMapShrinkThreshold {
// Not shrinking it because it has not grown to the minimum threshold yet.
return
}
now := time.Now()
if now.Sub(c.chunkRefMapLastShrink) < chunkRefMapMinShrinkInterval {
// Not shrinking it because the minimum duration between shrink-events has not passed yet.
return
}
// Re-initialize the chunk ref map to half of the peak size that it has grown to since the last re-init event.
// We are trying to hit the sweet spot in the trade-off between initializing it to a very small size
// potentially resulting in many allocations to re-grow it, and initializing it to a large size potentially
// resulting in unused allocated memory.
c.chunkRefMap = make(map[ChunkDiskMapperRef]chunkenc.Chunk, c.chunkRefMapPeakSize/2)
c.chunkRefMapPeakSize = 0
c.chunkRefMapLastShrink = now
c.shrink.Inc()
}
func (c *chunkWriteQueue) addJob(job chunkWriteJob) (err error) {
defer func() {
if err == nil {
c.adds.Inc()
}
}()
c.isRunningMtx.Lock()
defer c.isRunningMtx.Unlock()
if !c.isRunning {
return errors.New("queue is not running")
}
c.chunkRefMapMtx.Lock()
c.chunkRefMap[job.ref] = job.chk
// Keep track of the peak usage of c.chunkRefMap.
if len(c.chunkRefMap) > c.chunkRefMapPeakSize {
c.chunkRefMapPeakSize = len(c.chunkRefMap)
}
c.chunkRefMapMtx.Unlock()
if ok := c.jobs.push(job); !ok {
c.chunkRefMapMtx.Lock()
delete(c.chunkRefMap, job.ref)
c.chunkRefMapMtx.Unlock()
return errors.New("queue is closed")
}
return nil
}
func (c *chunkWriteQueue) get(ref ChunkDiskMapperRef) chunkenc.Chunk {
c.chunkRefMapMtx.RLock()
defer c.chunkRefMapMtx.RUnlock()
chk, ok := c.chunkRefMap[ref]
if ok {
c.gets.Inc()
}
return chk
}
func (c *chunkWriteQueue) stop() {
c.isRunningMtx.Lock()
defer c.isRunningMtx.Unlock()
if !c.isRunning {
return
}
c.isRunning = false
c.jobs.close()
c.workerWg.Wait()
}
func (c *chunkWriteQueue) queueIsEmpty() bool {
return c.queueSize() == 0
}
func (c *chunkWriteQueue) queueIsFull() bool {
// When the queue is full and blocked on the writer the chunkRefMap has one more job than the cap of the jobCh
// because one job is currently being processed and blocked in the writer.
return c.queueSize() == c.jobs.maxSize+1
}
func (c *chunkWriteQueue) queueSize() int {
c.chunkRefMapMtx.Lock()
defer c.chunkRefMapMtx.Unlock()
// Looking at chunkRefMap instead of jobCh because the job is popped from the chan before it has
// been fully processed, it remains in the chunkRefMap until the processing is complete.
return len(c.chunkRefMap)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/head_chunks_test.go | tsdb/chunks/head_chunks_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"encoding/binary"
"errors"
"math/rand"
"os"
"strconv"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
var writeQueueSize int
func TestMain(m *testing.M) {
// Run all tests with the chunk write queue disabled.
writeQueueSize = 0
exitVal := m.Run()
if exitVal != 0 {
os.Exit(exitVal)
}
// Re-run all tests with the chunk write queue size of 1e6.
writeQueueSize = 1000000
exitVal = m.Run()
os.Exit(exitVal)
}
func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
}()
expectedBytes := []byte{}
nextChunkOffset := uint64(HeadChunkFileHeaderSize)
chkCRC32 := newCRC32()
type expectedDataType struct {
seriesRef HeadSeriesRef
chunkRef ChunkDiskMapperRef
mint, maxt int64
numSamples uint16
chunk chunkenc.Chunk
isOOO bool
}
expectedData := []expectedDataType{}
var buf [MaxHeadChunkMetaSize]byte
totalChunks := 0
var firstFileName string
for hrw.curFileSequence < 3 || hrw.chkWriter.Buffered() == 0 {
addChunks := func(numChunks int) {
for range numChunks {
seriesRef, chkRef, mint, maxt, chunk, isOOO := createChunk(t, totalChunks, hrw)
totalChunks++
expectedData = append(expectedData, expectedDataType{
seriesRef: seriesRef,
mint: mint,
maxt: maxt,
chunkRef: chkRef,
chunk: chunk,
numSamples: uint16(chunk.NumSamples()),
isOOO: isOOO,
})
if hrw.curFileSequence != 1 {
// We are checking for bytes written only for the first file.
continue
}
// Calculating expected bytes written on disk for first file.
firstFileName = hrw.curFile.Name()
require.Equal(t, newChunkDiskMapperRef(1, nextChunkOffset), chkRef)
bytesWritten := 0
chkCRC32.Reset()
binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(seriesRef))
bytesWritten += SeriesRefSize
binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(mint))
bytesWritten += MintMaxtSize
binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(maxt))
bytesWritten += MintMaxtSize
enc := chunk.Encoding()
if isOOO {
enc = hrw.ApplyOutOfOrderMask(enc)
}
buf[bytesWritten] = byte(enc)
bytesWritten += ChunkEncodingSize
n := binary.PutUvarint(buf[bytesWritten:], uint64(len(chunk.Bytes())))
bytesWritten += n
expectedBytes = append(expectedBytes, buf[:bytesWritten]...)
_, err := chkCRC32.Write(buf[:bytesWritten])
require.NoError(t, err)
expectedBytes = append(expectedBytes, chunk.Bytes()...)
_, err = chkCRC32.Write(chunk.Bytes())
require.NoError(t, err)
expectedBytes = append(expectedBytes, chkCRC32.Sum(nil)...)
// += seriesRef, mint, maxt, encoding, chunk data len, chunk data, CRC.
nextChunkOffset += SeriesRefSize + 2*MintMaxtSize + ChunkEncodingSize + uint64(n) + uint64(len(chunk.Bytes())) + CRCSize
}
}
addChunks(100)
hrw.CutNewFile()
addChunks(10) // For chunks in in-memory buffer.
}
// Checking on-disk bytes for the first file.
require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
require.Len(t, hrw.closers, len(hrw.mmappedChunkFiles))
actualBytes, err := os.ReadFile(firstFileName)
require.NoError(t, err)
// Check header of the segment file.
require.Equal(t, MagicHeadChunks, int(binary.BigEndian.Uint32(actualBytes[0:MagicChunksSize])))
require.Equal(t, chunksFormatV1, int(actualBytes[MagicChunksSize]))
// Remaining chunk data.
fileEnd := HeadChunkFileHeaderSize + len(expectedBytes)
require.Equal(t, expectedBytes, actualBytes[HeadChunkFileHeaderSize:fileEnd])
// Testing reading of chunks.
for _, exp := range expectedData {
actChunk, err := hrw.Chunk(exp.chunkRef)
require.NoError(t, err)
require.Equal(t, exp.chunk.Bytes(), actChunk.Bytes())
}
// Testing IterateAllChunks method.
dir := hrw.dir.Name()
require.NoError(t, hrw.Close())
hrw = createChunkDiskMapper(t, dir)
idx := 0
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, _, maxt int64, numSamples uint16, _ chunkenc.Encoding, isOOO bool) error {
t.Helper()
expData := expectedData[idx]
require.Equal(t, expData.seriesRef, seriesRef)
require.Equal(t, expData.chunkRef, chunkRef)
require.Equal(t, expData.maxt, maxt)
require.Equal(t, expData.maxt, maxt)
require.Equal(t, expData.numSamples, numSamples)
require.Equal(t, expData.isOOO, isOOO)
actChunk, err := hrw.Chunk(expData.chunkRef)
require.NoError(t, err)
require.Equal(t, expData.chunk.Bytes(), actChunk.Bytes())
idx++
return nil
}))
require.Len(t, expectedData, idx)
}
// TestChunkDiskMapper_Truncate tests
// * If truncation is happening properly based on the time passed.
// * The active file is not deleted even if the passed time makes it eligible to be deleted.
// * Non-empty current file leads to creation of another file after truncation.
func TestChunkDiskMapper_Truncate(t *testing.T) {
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
}()
timeRange := 0
addChunk := func() {
t.Helper()
step := 100
mint, maxt := timeRange+1, timeRange+step-1
var err error
awaitCb := make(chan struct{})
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(cbErr error) {
err = cbErr
close(awaitCb)
})
<-awaitCb
require.NoError(t, err)
timeRange += step
}
verifyFiles := func(remainingFiles []int) {
t.Helper()
files, err := os.ReadDir(hrw.dir.Name())
require.NoError(t, err)
require.Len(t, files, len(remainingFiles), "files on disk")
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
require.Len(t, hrw.closers, len(remainingFiles), "closers")
for _, i := range remainingFiles {
_, ok := hrw.mmappedChunkFiles[i]
require.True(t, ok)
}
}
// Create segments 1 to 7.
for i := 1; i <= 7; i++ {
hrw.CutNewFile()
addChunk()
}
verifyFiles([]int{1, 2, 3, 4, 5, 6, 7})
// Truncating files.
require.NoError(t, hrw.Truncate(3))
// Add a chunk to trigger cutting of new file.
addChunk()
verifyFiles([]int{3, 4, 5, 6, 7, 8})
dir := hrw.dir.Name()
require.NoError(t, hrw.Close())
// Restarted.
hrw = createChunkDiskMapper(t, dir)
verifyFiles([]int{3, 4, 5, 6, 7, 8})
// New file is created after restart even if last file was empty.
addChunk()
verifyFiles([]int{3, 4, 5, 6, 7, 8, 9})
// Truncating files after restart.
require.NoError(t, hrw.Truncate(6))
verifyFiles([]int{6, 7, 8, 9})
// Truncating a second time without adding a chunk shouldn't create a new file.
require.NoError(t, hrw.Truncate(6))
verifyFiles([]int{6, 7, 8, 9})
// Add a chunk to trigger cutting of new file.
addChunk()
verifyFiles([]int{6, 7, 8, 9, 10})
// Truncation by file number.
require.NoError(t, hrw.Truncate(8))
verifyFiles([]int{8, 9, 10})
// Truncating till current time should not delete the current active file.
require.NoError(t, hrw.Truncate(10))
// Add a chunk to trigger cutting of new file.
addChunk()
verifyFiles([]int{10, 11}) // One file is the previously active file and one currently created.
}
// TestChunkDiskMapper_Truncate_PreservesFileSequence tests that truncation doesn't poke
// holes into the file sequence, even if there are empty files in between non-empty files.
// This test exposes https://github.com/prometheus/prometheus/issues/7412 where the truncation
// simply deleted all empty files instead of stopping once it encountered a non-empty file.
func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
}()
timeRange := 0
addChunk := func() {
t.Helper()
awaitCb := make(chan struct{})
step := 100
mint, maxt := timeRange+1, timeRange+step-1
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(err error) {
close(awaitCb)
require.NoError(t, err)
})
<-awaitCb
timeRange += step
}
emptyFile := func() {
t.Helper()
_, _, err := hrw.cut()
require.NoError(t, err)
hrw.evtlPosMtx.Lock()
hrw.evtlPos.toNewFile()
hrw.evtlPosMtx.Unlock()
}
nonEmptyFile := func() {
t.Helper()
emptyFile()
addChunk()
}
addChunk() // 1. Created with the first chunk.
nonEmptyFile() // 2.
nonEmptyFile() // 3.
emptyFile() // 4.
nonEmptyFile() // 5.
emptyFile() // 6.
verifyFiles := func(remainingFiles []int) {
t.Helper()
files, err := os.ReadDir(hrw.dir.Name())
require.NoError(t, err)
require.Len(t, files, len(remainingFiles), "files on disk")
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
require.Len(t, hrw.closers, len(remainingFiles), "closers")
for _, i := range remainingFiles {
_, ok := hrw.mmappedChunkFiles[i]
require.True(t, ok, "remaining file %d not in hrw.mmappedChunkFiles", i)
}
}
verifyFiles([]int{1, 2, 3, 4, 5, 6})
// Truncating files till 2. It should not delete anything after 3 (inclusive)
// though files 4 and 6 are empty.
require.NoError(t, hrw.Truncate(3))
verifyFiles([]int{3, 4, 5, 6})
// Add chunk, so file 6 is not empty anymore.
addChunk()
verifyFiles([]int{3, 4, 5, 6})
// Truncating till file 3 should also delete file 4, because it is empty.
require.NoError(t, hrw.Truncate(5))
addChunk()
verifyFiles([]int{5, 6, 7})
dir := hrw.dir.Name()
require.NoError(t, hrw.Close())
// Restarting checks for unsequential files.
hrw = createChunkDiskMapper(t, dir)
verifyFiles([]int{5, 6, 7})
}
func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) {
hrw := createChunkDiskMapper(t, "")
t.Cleanup(func() {
require.NoError(t, hrw.Close())
})
// This test should only run when the queue is enabled.
if hrw.writeQueue == nil {
t.Skip("This test should only run when the queue is enabled")
}
// Add an artificial delay in the writeChunk function to easily trigger the race condition.
origWriteChunk := hrw.writeQueue.writeChunk
hrw.writeQueue.writeChunk = func(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
time.Sleep(100 * time.Millisecond)
return origWriteChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile)
}
wg := sync.WaitGroup{}
wg.Add(2)
// Write a chunk. Since the queue is enabled, the chunk will be written asynchronously (with the artificial delay).
ref := hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) {
defer wg.Done()
require.NoError(t, err)
})
seq, _ := ref.Unpack()
require.Equal(t, 1, seq)
// Truncate, simulating that all chunks from segment files before 1 can be dropped.
require.NoError(t, hrw.Truncate(1))
// Request to cut a new file when writing the next chunk. If there's a race condition, cutting a new file will
// allow us to detect there's actually an issue with the sequence number (because it's checked when a new segment
// file is created).
hrw.CutNewFile()
// Write another chunk. This will cut a new file.
ref = hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) {
defer wg.Done()
require.NoError(t, err)
})
seq, _ = ref.Unpack()
require.Equal(t, 2, seq)
wg.Wait()
}
// TestHeadReadWriter_TruncateAfterFailedIterateChunks tests for
// https://github.com/prometheus/prometheus/issues/7753
func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
}()
// Write a chunks to iterate on it later.
var err error
awaitCb := make(chan struct{})
hrw.WriteChunk(1, 0, 1000, randomChunk(t), false, func(cbErr error) {
err = cbErr
close(awaitCb)
})
<-awaitCb
require.NoError(t, err)
dir := hrw.dir.Name()
require.NoError(t, hrw.Close())
// Restarting to recreate https://github.com/prometheus/prometheus/issues/7753.
hrw = createChunkDiskMapper(t, dir)
// Forcefully failing IterateAllChunks.
require.Error(t, hrw.IterateAllChunks(func(HeadSeriesRef, ChunkDiskMapperRef, int64, int64, uint16, chunkenc.Encoding, bool) error {
return errors.New("random error")
}))
// Truncation call should not return error after IterateAllChunks fails.
require.NoError(t, hrw.Truncate(2000))
}
func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
hrw := createChunkDiskMapper(t, "")
timeRange := 0
addChunk := func() {
t.Helper()
step := 100
mint, maxt := timeRange+1, timeRange+step-1
var err error
awaitCb := make(chan struct{})
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(cbErr error) {
err = cbErr
close(awaitCb)
})
<-awaitCb
require.NoError(t, err)
timeRange += step
}
nonEmptyFile := func() {
t.Helper()
hrw.CutNewFile()
addChunk()
}
addChunk() // 1. Created with the first chunk.
nonEmptyFile() // 2.
nonEmptyFile() // 3.
require.Len(t, hrw.mmappedChunkFiles, 3)
lastFile := 0
for idx := range hrw.mmappedChunkFiles {
if idx > lastFile {
lastFile = idx
}
}
require.Equal(t, 3, lastFile)
dir := hrw.dir.Name()
require.NoError(t, hrw.Close())
writeCorruptLastFile := func(b []byte) {
fname := segmentFile(dir, lastFile+1)
f, err := os.OpenFile(fname, os.O_WRONLY|os.O_CREATE, 0o666)
require.NoError(t, err)
_, err = f.Write(b)
require.NoError(t, err)
require.NoError(t, f.Sync())
stat, err := f.Stat()
require.NoError(t, err)
require.Equal(t, int64(len(b)), stat.Size())
require.NoError(t, f.Close())
}
checkRepair := func() {
// Open chunk disk mapper again, corrupt file should be removed.
hrw = createChunkDiskMapper(t, dir)
// Removed from memory.
require.Len(t, hrw.mmappedChunkFiles, 3)
for idx := range hrw.mmappedChunkFiles {
require.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file")
}
// Removed even from disk.
files, err := os.ReadDir(dir)
require.NoError(t, err)
require.Len(t, files, 3)
for _, fi := range files {
seq, err := strconv.ParseUint(fi.Name(), 10, 64)
require.NoError(t, err)
require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
}
require.NoError(t, hrw.Close())
}
// Write an empty last file mimicking an abrupt shutdown on file creation.
writeCorruptLastFile(nil)
// Removes empty last file.
checkRepair()
// Write another empty last file with 0 bytes.
writeCorruptLastFile([]byte{0, 0, 0, 0, 0, 0, 0, 0})
// Removes the 0 filled last file.
checkRepair()
// Write another corrupt file with less than 4 bytes (size of magic number).
writeCorruptLastFile([]byte{1, 2})
// Removes the partial file.
checkRepair()
// Check that it does not delete a valid last file.
checkRepair()
}
func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper {
if dir == "" {
dir = t.TempDir()
}
hrw, err := NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), DefaultWriteBufferSize, writeQueueSize)
require.NoError(t, err)
require.False(t, hrw.fileMaxtSet)
require.NoError(t, hrw.IterateAllChunks(func(HeadSeriesRef, ChunkDiskMapperRef, int64, int64, uint16, chunkenc.Encoding, bool) error {
return nil
}))
require.True(t, hrw.fileMaxtSet)
return hrw
}
func randomChunk(t *testing.T) chunkenc.Chunk {
chunk := chunkenc.NewXORChunk()
length := rand.Int() % 120
app, err := chunk.Appender()
require.NoError(t, err)
for range length {
app.Append(rand.Int63(), rand.Float64())
}
return chunk
}
func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, chunk chunkenc.Chunk, isOOO bool) {
var err error
seriesRef = HeadSeriesRef(rand.Int63())
mint = int64((idx)*1000 + 1)
maxt = int64((idx + 1) * 1000)
chunk = randomChunk(t)
awaitCb := make(chan struct{})
if rand.Intn(2) == 0 {
isOOO = true
}
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, isOOO, func(error) {
require.NoError(t, err)
close(awaitCb)
})
<-awaitCb
return seriesRef, chunkRef, mint, maxt, chunk, isOOO
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/samples.go | tsdb/chunks/samples.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
type Samples interface {
Get(i int) Sample
Len() int
}
type Sample interface {
T() int64
F() float64
H() *histogram.Histogram
FH() *histogram.FloatHistogram
Type() chunkenc.ValueType
Copy() Sample // Returns a deep copy.
}
type SampleSlice []Sample
func (s SampleSlice) Get(i int) Sample { return s[i] }
func (s SampleSlice) Len() int { return len(s) }
type sample struct {
t int64
f float64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
func (s sample) T() int64 {
return s.t
}
func (s sample) F() float64 {
return s.f
}
func (s sample) H() *histogram.Histogram {
return s.h
}
func (s sample) FH() *histogram.FloatHistogram {
return s.fh
}
func (s sample) Type() chunkenc.ValueType {
switch {
case s.h != nil:
return chunkenc.ValHistogram
case s.fh != nil:
return chunkenc.ValFloatHistogram
default:
return chunkenc.ValFloat
}
}
func (s sample) Copy() Sample {
c := sample{t: s.t, f: s.f}
if s.h != nil {
c.h = s.h.Copy()
}
if s.fh != nil {
c.fh = s.fh.Copy()
}
return c
}
// GenerateSamples starting at start and counting up numSamples.
func GenerateSamples(start, numSamples int) []Sample {
return generateSamples(start, numSamples, func(i int) Sample {
return sample{
t: int64(i),
f: float64(i),
}
})
}
func generateSamples(start, numSamples int, gen func(int) Sample) []Sample {
samples := make([]Sample, 0, numSamples)
for i := start; i < start+numSamples; i++ {
samples = append(samples, gen(i))
}
return samples
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/head_chunks_windows.go | tsdb/chunks/head_chunks_windows.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// Windows needs pre-allocation to m-map the file.
var HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/chunks.go | tsdb/chunks/chunks.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"io"
"os"
"path/filepath"
"strconv"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
// Segment header fields constants.
const (
// MagicChunks is 4 bytes at the head of a series file.
MagicChunks = 0x85BD40DD
// MagicChunksSize is the size in bytes of MagicChunks.
MagicChunksSize = 4
chunksFormatV1 = 1
ChunksFormatVersionSize = 1
segmentHeaderPaddingSize = 3
// SegmentHeaderSize defines the total size of the header part.
SegmentHeaderSize = MagicChunksSize + ChunksFormatVersionSize + segmentHeaderPaddingSize
)
// Chunk fields constants.
const (
// MaxChunkLengthFieldSize defines the maximum size of the data length part.
MaxChunkLengthFieldSize = binary.MaxVarintLen32
// ChunkEncodingSize defines the size of the chunk encoding part.
ChunkEncodingSize = 1
)
// ChunkRef is a generic reference for reading chunk data. In prometheus it
// is either a HeadChunkRef or BlockChunkRef, though other implementations
// may have their own reference types.
type ChunkRef uint64
// HeadSeriesRef refers to in-memory series.
type HeadSeriesRef uint64
// HeadChunkRef packs a HeadSeriesRef and a ChunkID into a global 8 Byte ID.
// The HeadSeriesRef and ChunkID may not exceed 5 and 3 bytes respectively.
type HeadChunkRef uint64
func NewHeadChunkRef(hsr HeadSeriesRef, chunkID HeadChunkID) HeadChunkRef {
if hsr > (1<<40)-1 {
panic("series ID exceeds 5 bytes")
}
if chunkID > (1<<24)-1 {
panic("chunk ID exceeds 3 bytes")
}
return HeadChunkRef(uint64(hsr<<24) | uint64(chunkID))
}
func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) {
return HeadSeriesRef(p >> 24), HeadChunkID(p<<40) >> 40
}
// HeadChunkID refers to a specific chunk in a series (memSeries) in the Head.
// Each memSeries has its own monotonically increasing number to refer to its chunks.
// If the HeadChunkID value is...
// - memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk.
// - less than the above, but >= memSeries.firstID, then it's
// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID.
//
// If memSeries.headChunks is non-nil it points to a *memChunk that holds the current
// "open" (accepting appends) instance. *memChunk is a linked list and memChunk.next pointer
// might link to the older *memChunk instance.
// If there are multiple *memChunk instances linked to each other from memSeries.headChunks
// they will be m-mapped as soon as possible leaving only "open" *memChunk instance.
//
// Example:
// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
//
// | HeadChunkID value | refers to ... |
// |-------------------|----------------------------------------------------------------------------------------|
// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. |
// | 12 | *memChunk{next: nil}
// | 13 | *memChunk{next: ^}
// | 14 | memSeries.headChunks -> *memChunk{next: ^}
type HeadChunkID uint64
// BlockChunkRef refers to a chunk within a persisted block.
// The upper 4 bytes are for the segment index and
// the lower 4 bytes are for the segment offset where the data starts for this chunk.
type BlockChunkRef uint64
// NewBlockChunkRef packs the file index and byte offset into a BlockChunkRef.
func NewBlockChunkRef(fileIndex, fileOffset uint64) BlockChunkRef {
return BlockChunkRef(fileIndex<<32 | fileOffset)
}
func (b BlockChunkRef) Unpack() (int, int) {
sgmIndex := int(b >> 32)
chkStart := int((b << 32) >> 32)
return sgmIndex, chkStart
}
// Meta holds information about one or more chunks.
// For examples of when chunks.Meta could refer to multiple chunks, see
// ChunkReader.ChunkOrIterable().
type Meta struct {
// Ref and Chunk hold either a reference that can be used to retrieve
// chunk data or the data itself.
// If Chunk is nil, call ChunkReader.ChunkOrIterable(Meta.Ref) to get the
// chunk and assign it to the Chunk field. If an iterable is returned from
// that method, then it may not be possible to set Chunk as the iterable
// might form several chunks.
Ref ChunkRef
Chunk chunkenc.Chunk
// Time range the data covers.
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
MinTime, MaxTime int64
}
// ChunkFromSamples requires all samples to have the same type.
func ChunkFromSamples(s []Sample) (Meta, error) {
return ChunkFromSamplesGeneric(SampleSlice(s))
}
// ChunkFromSamplesGeneric requires all samples to have the same type.
func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
emptyChunk := Meta{Chunk: chunkenc.NewXORChunk()}
mint, maxt := int64(0), int64(0)
if s.Len() > 0 {
mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T()
}
if s.Len() == 0 {
return emptyChunk, nil
}
sampleType := s.Get(0).Type()
c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding())
if err != nil {
return Meta{}, err
}
ca, _ := c.Appender()
var newChunk chunkenc.Chunk
for i := 0; i < s.Len(); i++ {
switch sampleType {
case chunkenc.ValFloat:
ca.Append(s.Get(i).T(), s.Get(i).F())
case chunkenc.ValHistogram:
newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false)
if err != nil {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, errors.New("did not expect to start a second chunk")
}
case chunkenc.ValFloatHistogram:
newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
if err != nil {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, errors.New("did not expect to start a second chunk")
}
default:
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
}
}
return Meta{
MinTime: mint,
MaxTime: maxt,
Chunk: c,
}, nil
}
// ChunkMetasToSamples converts a slice of chunk meta data to a slice of samples.
// Used in tests to compare the content of chunks.
func ChunkMetasToSamples(chunks []Meta) (result []Sample) {
if len(chunks) == 0 {
return result
}
for _, chunk := range chunks {
it := chunk.Chunk.Iterator(nil)
for vt := it.Next(); vt != chunkenc.ValNone; vt = it.Next() {
switch vt {
case chunkenc.ValFloat:
t, v := it.At()
result = append(result, sample{t: t, f: v})
case chunkenc.ValHistogram:
t, h := it.AtHistogram(nil)
result = append(result, sample{t: t, h: h})
case chunkenc.ValFloatHistogram:
t, fh := it.AtFloatHistogram(nil)
result = append(result, sample{t: t, fh: fh})
default:
panic("unexpected value type")
}
}
}
return result
}
// Iterator iterates over the chunks of a single time series.
type Iterator interface {
// At returns the current meta.
// It depends on the implementation whether the chunk is populated or not.
At() Meta
// Next advances the iterator by one.
Next() bool
// Err returns optional error if Next is false.
Err() error
}
// writeHash writes the chunk encoding and raw data into the provided hash.
func (cm *Meta) writeHash(h hash.Hash, buf []byte) error {
buf = append(buf[:0], byte(cm.Chunk.Encoding()))
if _, err := h.Write(buf[:1]); err != nil {
return err
}
if _, err := h.Write(cm.Chunk.Bytes()); err != nil {
return err
}
return nil
}
// OverlapsClosedInterval Returns true if the chunk overlaps [mint, maxt].
func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
// The chunk itself is a closed interval [cm.MinTime, cm.MaxTime].
return cm.MinTime <= maxt && mint <= cm.MaxTime
}
var errInvalidSize = errors.New("invalid size")
var castagnoliTable *crc32.Table
func init() {
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
}
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
// polynomial may be easily changed in one location at a later time, if necessary.
func newCRC32() hash.Hash32 {
return crc32.New(castagnoliTable)
}
// Check if the CRC of data matches that stored in sum, computed when the chunk was stored.
func checkCRC32(data, sum []byte) error {
got := crc32.Checksum(data, castagnoliTable)
// This combination of shifts is the inverse of digest.Sum() in go/src/hash/crc32.
want := uint32(sum[0])<<24 + uint32(sum[1])<<16 + uint32(sum[2])<<8 + uint32(sum[3])
if got != want {
return fmt.Errorf("checksum mismatch expected:%x, actual:%x", want, got)
}
return nil
}
// Writer implements the ChunkWriter interface for the standard
// serialization format.
type Writer struct {
dirFile *os.File
files []*os.File
wbuf fileutil.BufWriter
n int64
crc32 hash.Hash
buf [binary.MaxVarintLen32]byte
segmentSize int64
useUncachedIO bool
}
const (
// DefaultChunkSegmentSize is the default chunks segment size.
DefaultChunkSegmentSize = 512 * 1024 * 1024
)
type writerOptions struct {
segmentSize int64
useUncachedIO bool
}
type WriterOption func(*writerOptions)
func WithUncachedIO(enabled bool) WriterOption {
return func(o *writerOptions) {
o.useUncachedIO = enabled
}
}
// WithSegmentSize sets the chunk segment size for the writer.
// Passing a value less than or equal to 0 causes the default segment size (DefaultChunkSegmentSize) to be used.
func WithSegmentSize(segmentSize int64) WriterOption {
return func(o *writerOptions) {
if segmentSize <= 0 {
segmentSize = DefaultChunkSegmentSize
}
o.segmentSize = segmentSize
}
}
// NewWriter returns a new writer against the given directory.
// It uses DefaultChunkSegmentSize as the default segment size.
func NewWriter(dir string, opts ...WriterOption) (*Writer, error) {
options := &writerOptions{
segmentSize: DefaultChunkSegmentSize,
}
for _, opt := range opts {
opt(options)
}
if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
dirFile, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
return &Writer{
dirFile: dirFile,
n: 0,
crc32: newCRC32(),
segmentSize: options.segmentSize,
useUncachedIO: options.useUncachedIO,
}, nil
}
func (w *Writer) tail() *os.File {
if len(w.files) == 0 {
return nil
}
return w.files[len(w.files)-1]
}
// finalizeTail writes all pending data to the current tail file if any,
// truncates its size, and closes it.
func (w *Writer) finalizeTail() error {
tf := w.tail()
if tf == nil {
return nil
}
if w.wbuf != nil {
if err := w.wbuf.Flush(); err != nil {
return err
}
}
if err := tf.Sync(); err != nil {
return err
}
// As the file was pre-allocated, we truncate any superfluous zero bytes.
off, err := tf.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if err := tf.Truncate(off); err != nil {
return err
}
return tf.Close()
}
func (w *Writer) cut() error {
// Sync current tail to disk and close.
if err := w.finalizeTail(); err != nil {
return err
}
n, f, _, err := cutSegmentFile(w.dirFile, MagicChunks, chunksFormatV1, w.segmentSize)
if err != nil {
return err
}
w.n = int64(n)
w.files = append(w.files, f)
if w.wbuf != nil {
if err := w.wbuf.Reset(f); err != nil {
return err
}
} else {
var (
wbuf fileutil.BufWriter
err error
)
size := 8 * 1024 * 1024
if w.useUncachedIO {
// Uncached IO is implemented using direct I/O for now.
wbuf, err = fileutil.NewDirectIOWriter(f, size)
} else {
wbuf, err = fileutil.NewBufioWriterWithSize(f, size)
}
if err != nil {
return err
}
w.wbuf = wbuf
}
return nil
}
func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, allocSize int64) (headerSize int, newFile *os.File, seq int, returnErr error) {
p, seq, err := nextSequenceFile(dirFile.Name())
if err != nil {
return 0, nil, 0, fmt.Errorf("next sequence file: %w", err)
}
ptmp := p + ".tmp"
f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil {
return 0, nil, 0, fmt.Errorf("open temp file: %w", err)
}
defer func() {
if returnErr != nil {
errs := tsdb_errors.NewMulti(returnErr)
if f != nil {
errs.Add(f.Close())
}
// Calling RemoveAll on a non-existent file does not return error.
errs.Add(os.RemoveAll(ptmp))
returnErr = errs.Err()
}
}()
if allocSize > 0 {
if err = fileutil.Preallocate(f, allocSize, true); err != nil {
return 0, nil, 0, fmt.Errorf("preallocate: %w", err)
}
}
if err = dirFile.Sync(); err != nil {
return 0, nil, 0, fmt.Errorf("sync directory: %w", err)
}
// Write header metadata for new file.
metab := make([]byte, SegmentHeaderSize)
binary.BigEndian.PutUint32(metab[:MagicChunksSize], magicNumber)
metab[4] = chunksFormat
n, err := f.Write(metab)
if err != nil {
return 0, nil, 0, fmt.Errorf("write header: %w", err)
}
if err := f.Close(); err != nil {
return 0, nil, 0, fmt.Errorf("close temp file: %w", err)
}
f = nil
if err := fileutil.Rename(ptmp, p); err != nil {
return 0, nil, 0, fmt.Errorf("replace file: %w", err)
}
f, err = os.OpenFile(p, os.O_WRONLY, 0o666)
if err != nil {
return 0, nil, 0, fmt.Errorf("open final file: %w", err)
}
// Skip header for further writes.
offset := int64(n)
if _, err := f.Seek(offset, 0); err != nil {
return 0, nil, 0, fmt.Errorf("seek to %d in final file: %w", offset, err)
}
return n, f, seq, nil
}
func (w *Writer) write(b []byte) error {
n, err := w.wbuf.Write(b)
w.n += int64(n)
return err
}
// WriteChunks writes as many chunks as possible to the current segment,
// cuts a new segment when the current segment is full and
// writes the rest of the chunks in the new segment.
func (w *Writer) WriteChunks(chks ...Meta) error {
var (
batchSize = int64(0)
batchStart = 0
batches = make([][]Meta, 1)
batchID = 0
firstBatch = true
)
for i, chk := range chks {
// Each chunk contains: data length + encoding + the data itself + crc32
chkSize := int64(MaxChunkLengthFieldSize) // The data length is a variable length field so use the maximum possible value.
chkSize += ChunkEncodingSize // The chunk encoding.
chkSize += int64(len(chk.Chunk.Bytes())) // The data itself.
chkSize += crc32.Size // The 4 bytes of crc32.
batchSize += chkSize
// Cut a new batch when it is not the first chunk(to avoid empty segments) and
// the batch is too large to fit in the current segment.
cutNewBatch := (i != 0) && (batchSize+SegmentHeaderSize > w.segmentSize)
// If the segment already has some data then
// the first batch size calculation should account for that.
if firstBatch && w.n > SegmentHeaderSize {
cutNewBatch = batchSize+w.n > w.segmentSize
if cutNewBatch {
firstBatch = false
}
}
if cutNewBatch {
batchStart = i
batches = append(batches, []Meta{})
batchID++
batchSize = chkSize
}
batches[batchID] = chks[batchStart : i+1]
}
// Create a new segment when one doesn't already exist.
if w.n == 0 {
if err := w.cut(); err != nil {
return err
}
}
for i, chks := range batches {
if err := w.writeChunks(chks); err != nil {
return err
}
// Cut a new segment only when there are more chunks to write.
// Avoid creating a new empty segment at the end of the write.
if i < len(batches)-1 {
if err := w.cut(); err != nil {
return err
}
}
}
return nil
}
// writeChunks writes the chunks into the current segment irrespective
// of the configured segment size limit. A segment should have been already
// started before calling this.
func (w *Writer) writeChunks(chks []Meta) error {
if len(chks) == 0 {
return nil
}
seq := uint64(w.seq())
for i := range chks {
chk := &chks[i]
chk.Ref = ChunkRef(NewBlockChunkRef(seq, uint64(w.n)))
n := binary.PutUvarint(w.buf[:], uint64(len(chk.Chunk.Bytes())))
if err := w.write(w.buf[:n]); err != nil {
return err
}
w.buf[0] = byte(chk.Chunk.Encoding())
if err := w.write(w.buf[:1]); err != nil {
return err
}
if err := w.write(chk.Chunk.Bytes()); err != nil {
return err
}
w.crc32.Reset()
if err := chk.writeHash(w.crc32, w.buf[:]); err != nil {
return err
}
if err := w.write(w.crc32.Sum(w.buf[:0])); err != nil {
return err
}
}
return nil
}
func (w *Writer) seq() int {
return len(w.files) - 1
}
func (w *Writer) Close() error {
if err := w.finalizeTail(); err != nil {
return err
}
// close dir file (if not windows platform will fail on rename)
return w.dirFile.Close()
}
// ByteSlice abstracts a byte slice.
type ByteSlice interface {
Len() int
Range(start, end int) []byte
}
type realByteSlice []byte
func (b realByteSlice) Len() int {
return len(b)
}
func (b realByteSlice) Range(start, end int) []byte {
return b[start:end]
}
// Reader implements a ChunkReader for a serialized byte stream
// of series data.
type Reader struct {
// The underlying bytes holding the encoded series data.
// Each slice holds the data for a different segment.
bs []ByteSlice
cs []io.Closer // Closers for resources behind the byte slices.
size int64 // The total size of bytes in the reader.
pool chunkenc.Pool
}
func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, error) {
cr := Reader{pool: pool, bs: bs, cs: cs}
for i, b := range cr.bs {
if b.Len() < SegmentHeaderSize {
return nil, fmt.Errorf("invalid segment header in segment %d: %w", i, errInvalidSize)
}
// Verify magic number.
if m := binary.BigEndian.Uint32(b.Range(0, MagicChunksSize)); m != MagicChunks {
return nil, fmt.Errorf("invalid magic number %x", m)
}
// Verify chunk format version.
if v := int(b.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 {
return nil, fmt.Errorf("invalid chunk format version %d", v)
}
cr.size += int64(b.Len())
}
return &cr, nil
}
// NewDirReader returns a new Reader against sequentially numbered files in the
// given directory.
func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
files, err := sequenceFiles(dir)
if err != nil {
return nil, err
}
if pool == nil {
pool = chunkenc.NewPool()
}
var (
bs []ByteSlice
cs []io.Closer
)
for _, fn := range files {
f, err := fileutil.OpenMmapFile(fn)
if err != nil {
return nil, tsdb_errors.NewMulti(
fmt.Errorf("mmap files: %w", err),
tsdb_errors.CloseAll(cs),
).Err()
}
cs = append(cs, f)
bs = append(bs, realByteSlice(f.Bytes()))
}
reader, err := newReader(bs, cs, pool)
if err != nil {
return nil, tsdb_errors.NewMulti(
err,
tsdb_errors.CloseAll(cs),
).Err()
}
return reader, nil
}
func (s *Reader) Close() error {
return tsdb_errors.CloseAll(s.cs)
}
// Size returns the size of the chunks.
func (s *Reader) Size() int64 {
return s.size
}
// ChunkOrIterable returns a chunk from a given reference.
func (s *Reader) ChunkOrIterable(meta Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack()
if sgmIndex >= len(s.bs) {
return nil, nil, fmt.Errorf("segment index %d out of range", sgmIndex)
}
sgmBytes := s.bs[sgmIndex]
if chkStart+MaxChunkLengthFieldSize > sgmBytes.Len() {
return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len())
}
// With the minimum chunk length this should never cause us reading
// over the end of the slice.
c := sgmBytes.Range(chkStart, chkStart+MaxChunkLengthFieldSize)
chkDataLen, n := binary.Uvarint(c)
if n <= 0 {
return nil, nil, fmt.Errorf("reading chunk length failed with %d", n)
}
chkEncStart := chkStart + n
chkEnd := chkEncStart + ChunkEncodingSize + int(chkDataLen) + crc32.Size
chkDataStart := chkEncStart + ChunkEncodingSize
chkDataEnd := chkEnd - crc32.Size
if chkEnd > sgmBytes.Len() {
return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len())
}
sum := sgmBytes.Range(chkDataEnd, chkEnd)
if err := checkCRC32(sgmBytes.Range(chkEncStart, chkDataEnd), sum); err != nil {
return nil, nil, err
}
chkData := sgmBytes.Range(chkDataStart, chkDataEnd)
chkEnc := sgmBytes.Range(chkEncStart, chkEncStart+ChunkEncodingSize)[0]
chk, err := s.pool.Get(chunkenc.Encoding(chkEnc), chkData)
return chk, nil, err
}
func nextSequenceFile(dir string) (string, int, error) {
files, err := os.ReadDir(dir)
if err != nil {
return "", 0, err
}
i := uint64(0)
for _, f := range files {
j, err := strconv.ParseUint(f.Name(), 10, 64)
if err != nil {
continue
}
// It is not necessary that we find the files in number order,
// for example with '1000000' and '200000', '1000000' would come first.
// Though this is a very very rare case, we check anyway for the max id.
if j > i {
i = j
}
}
return segmentFile(dir, int(i+1)), int(i + 1), nil
}
func segmentFile(baseDir string, index int) string {
return filepath.Join(baseDir, fmt.Sprintf("%0.6d", index))
}
func sequenceFiles(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var res []string
for _, fi := range files {
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
continue
}
res = append(res, filepath.Join(dir, fi.Name()))
}
return res, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.