repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/chunk_write_queue_test.go | tsdb/chunks/chunk_write_queue_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import (
"errors"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) {
var blockWriterWg sync.WaitGroup
blockWriterWg.Add(1)
// blockingChunkWriter blocks until blockWriterWg is done.
blockingChunkWriter := func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error {
blockWriterWg.Wait()
return nil
}
q := newChunkWriteQueue(nil, 1000, blockingChunkWriter)
defer q.stop()
defer blockWriterWg.Done()
testChunk := chunkenc.NewXORChunk()
var ref ChunkDiskMapperRef
job := chunkWriteJob{
chk: testChunk,
ref: ref,
}
require.NoError(t, q.addJob(job))
// Retrieve chunk from the queue.
gotChunk := q.get(ref)
require.Equal(t, testChunk, gotChunk)
}
func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
var (
gotSeriesRef HeadSeriesRef
gotMint, gotMaxt int64
gotChunk chunkenc.Chunk
gotRef ChunkDiskMapperRef
gotCutFile bool
)
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, _, cutFile bool) error {
gotSeriesRef = seriesRef
gotMint = mint
gotMaxt = maxt
gotChunk = chunk
gotRef = ref
gotCutFile = cutFile
return nil
}
q := newChunkWriteQueue(nil, 1000, blockingChunkWriter)
defer q.stop()
seriesRef := HeadSeriesRef(1)
var mint, maxt int64 = 2, 3
chunk := chunkenc.NewXORChunk()
ref := newChunkDiskMapperRef(321, 123)
cutFile := true
awaitCb := make(chan struct{})
require.NoError(t, q.addJob(chunkWriteJob{seriesRef: seriesRef, mint: mint, maxt: maxt, chk: chunk, ref: ref, cutFile: cutFile, callback: func(error) {
close(awaitCb)
}}))
<-awaitCb
// Compare whether the write function has received all job attributes correctly.
require.Equal(t, seriesRef, gotSeriesRef)
require.Equal(t, mint, gotMint)
require.Equal(t, maxt, gotMaxt)
require.Equal(t, chunk, gotChunk)
require.Equal(t, ref, gotRef)
require.Equal(t, cutFile, gotCutFile)
}
func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
sizeLimit := 100
unblockChunkWriterCh := make(chan struct{}, sizeLimit)
// blockingChunkWriter blocks until the unblockChunkWriterCh channel returns a value.
blockingChunkWriter := func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error {
<-unblockChunkWriterCh
return nil
}
q := newChunkWriteQueue(nil, sizeLimit, blockingChunkWriter)
defer q.stop()
// Unblock writers when shutting down.
defer close(unblockChunkWriterCh)
var chunkRef ChunkDiskMapperRef
var callbackWg sync.WaitGroup
addChunk := func() {
callbackWg.Add(1)
require.NoError(t, q.addJob(chunkWriteJob{
ref: chunkRef,
callback: func(error) {
callbackWg.Done()
},
}))
chunkRef++
}
unblockChunkWriter := func() {
unblockChunkWriterCh <- struct{}{}
}
// Fill the queue to the middle of the size limit.
for job := 0; job < sizeLimit/2; job++ {
addChunk()
}
// Consume the jobs.
for job := 0; job < sizeLimit/2; job++ {
unblockChunkWriter()
}
// Add jobs until the queue is full.
// Note that one more job than <sizeLimit> can be added because one will be processed by the worker already
// and it will block on the chunk write function.
for job := 0; job < sizeLimit+1; job++ {
addChunk()
}
// The queue should be full.
require.True(t, q.queueIsFull())
// Adding another job should block as long as no job from the queue gets consumed.
addedJob := atomic.NewBool(false)
go func() {
addChunk()
addedJob.Store(true)
}()
// Wait for 10ms while the adding of a new job is blocked.
time.Sleep(time.Millisecond * 10)
require.False(t, addedJob.Load())
// Consume one job from the queue.
unblockChunkWriter()
// Wait until the job has been added to the queue.
require.Eventually(t, func() bool { return addedJob.Load() }, time.Second, time.Millisecond*10)
// The queue should be full again.
require.True(t, q.queueIsFull())
// Consume <sizeLimit>+1 jobs from the queue.
// To drain the queue we need to consume <sizeLimit>+1 jobs because 1 job
// is already in the state of being processed.
for job := 0; job < sizeLimit+1; job++ {
require.False(t, q.queueIsEmpty())
unblockChunkWriter()
}
// Wait until all jobs have been processed.
callbackWg.Wait()
require.Eventually(t, q.queueIsEmpty, 500*time.Millisecond, 50*time.Millisecond)
}
func TestChunkWriteQueue_HandlerErrorViaCallback(t *testing.T) {
testError := errors.New("test error")
chunkWriter := func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error {
return testError
}
awaitCb := make(chan struct{})
var gotError error
callback := func(err error) {
gotError = err
close(awaitCb)
}
q := newChunkWriteQueue(nil, 1, chunkWriter)
defer q.stop()
job := chunkWriteJob{callback: callback}
require.NoError(t, q.addJob(job))
<-awaitCb
require.Equal(t, testError, gotError)
}
func BenchmarkChunkWriteQueue_addJob(b *testing.B) {
for _, withReads := range []bool{false, true} {
b.Run(fmt.Sprintf("with reads %t", withReads), func(b *testing.B) {
for _, concurrentWrites := range []int{1, 10, 100, 1000} {
b.Run(fmt.Sprintf("%d concurrent writes", concurrentWrites), func(b *testing.B) {
issueReadSignal := make(chan struct{})
q := newChunkWriteQueue(nil, 1000, func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error {
if withReads {
select {
case issueReadSignal <- struct{}{}:
default:
// Can't write to issueReadSignal, don't block but omit read instead.
}
}
return nil
})
b.Cleanup(func() {
// Stopped already, so no more writes will happen.
close(issueReadSignal)
})
b.Cleanup(q.stop)
start := sync.WaitGroup{}
start.Add(1)
jobs := make(chan chunkWriteJob, b.N)
for i := 0; b.Loop(); i++ {
jobs <- chunkWriteJob{
seriesRef: HeadSeriesRef(i),
ref: ChunkDiskMapperRef(i),
}
}
close(jobs)
go func() {
for range issueReadSignal {
// We don't care about the ID we're getting, we just want to grab the lock.
_ = q.get(ChunkDiskMapperRef(0))
}
}()
done := sync.WaitGroup{}
done.Add(concurrentWrites)
for range concurrentWrites {
go func() {
start.Wait()
for j := range jobs {
_ = q.addJob(j)
}
done.Done()
}()
}
b.ResetTimer()
start.Done()
done.Wait()
})
}
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunks/queue.go | tsdb/chunks/queue.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
import "sync"
// writeJobQueue is similar to buffered channel of chunkWriteJob, but manages its own buffers
// to avoid using a lot of memory when it's empty. It does that by storing elements into segments
// of equal size (segmentSize). When segment is not used anymore, reference to it are removed,
// so it can be treated as a garbage.
type writeJobQueue struct {
maxSize int
segmentSize int
mtx sync.Mutex // protects all following variables
pushed, popped *sync.Cond // signalled when something is pushed into the queue or popped from it
first, last *writeJobQueueSegment // pointer to first and last segment, if any
size int // total size of the queue
closed bool // after closing the queue, nothing can be pushed to it
}
type writeJobQueueSegment struct {
segment []chunkWriteJob
nextRead, nextWrite int // index of next read and next write in this segment.
nextSegment *writeJobQueueSegment // next segment, if any
}
func newWriteJobQueue(maxSize, segmentSize int) *writeJobQueue {
if maxSize <= 0 || segmentSize <= 0 {
panic("invalid queue")
}
q := &writeJobQueue{
maxSize: maxSize,
segmentSize: segmentSize,
}
q.pushed = sync.NewCond(&q.mtx)
q.popped = sync.NewCond(&q.mtx)
return q
}
func (q *writeJobQueue) close() {
q.mtx.Lock()
defer q.mtx.Unlock()
q.closed = true
// Unblock all blocked goroutines.
q.pushed.Broadcast()
q.popped.Broadcast()
}
// push blocks until there is space available in the queue, and then adds job to the queue.
// If queue is closed or gets closed while waiting for space, push returns false.
func (q *writeJobQueue) push(job chunkWriteJob) bool {
q.mtx.Lock()
defer q.mtx.Unlock()
// Wait until queue has more space or is closed.
for !q.closed && q.size >= q.maxSize {
q.popped.Wait()
}
if q.closed {
return false
}
// Check if this segment has more space for writing, and create new one if not.
if q.last == nil || q.last.nextWrite >= q.segmentSize {
prevLast := q.last
q.last = &writeJobQueueSegment{
segment: make([]chunkWriteJob, q.segmentSize),
}
if prevLast != nil {
prevLast.nextSegment = q.last
}
if q.first == nil {
q.first = q.last
}
}
q.last.segment[q.last.nextWrite] = job
q.last.nextWrite++
q.size++
q.pushed.Signal()
return true
}
// pop returns first job from the queue, and true.
// If queue is empty, pop blocks until there is a job (returns true), or until queue is closed (returns false).
// If queue was already closed, pop first returns all remaining elements from the queue (with true value), and only then returns false.
func (q *writeJobQueue) pop() (chunkWriteJob, bool) {
q.mtx.Lock()
defer q.mtx.Unlock()
// wait until something is pushed to the queue, or queue is closed.
for q.size == 0 {
if q.closed {
return chunkWriteJob{}, false
}
q.pushed.Wait()
}
res := q.first.segment[q.first.nextRead]
q.first.segment[q.first.nextRead] = chunkWriteJob{} // clear just-read element
q.first.nextRead++
q.size--
// If we have read all possible elements from first segment, we can drop it.
if q.first.nextRead >= q.segmentSize {
q.first = q.first.nextSegment
if q.first == nil {
q.last = nil
}
}
q.popped.Signal()
return res, true
}
// length returns number of all jobs in the queue.
func (q *writeJobQueue) length() int {
q.mtx.Lock()
defer q.mtx.Unlock()
return q.size
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/tsdbutil/dir_locker_test.go | tsdb/tsdbutil/dir_locker_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdbutil
import (
"testing"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/util/testutil"
)
func TestLockfile(t *testing.T) {
TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*DirLocker, testutil.Closer) {
locker, err := NewDirLocker(data, "tsdbutil", promslog.NewNopLogger(), nil)
require.NoError(t, err)
if createLock {
require.NoError(t, locker.Lock())
}
return locker, testutil.NewCallbackCloser(func() {
require.NoError(t, locker.Release())
})
})
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/tsdbutil/dir_locker_testutil.go | tsdb/tsdbutil/dir_locker_testutil.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdbutil
import (
"fmt"
"os"
"testing"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/util/testutil"
)
// TestDirLockerUsage performs a set of tests which guarantee correct usage of
// DirLocker. open should use data as the storage directory, and createLock
// to determine if a lock file should be used.
func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, createLock bool) (*DirLocker, testutil.Closer)) {
t.Helper()
cases := []struct {
fileAlreadyExists bool
lockFileDisabled bool
expectedValue int
}{
{
fileAlreadyExists: false,
lockFileDisabled: false,
expectedValue: lockfileCreatedCleanly,
},
{
fileAlreadyExists: true,
lockFileDisabled: false,
expectedValue: lockfileReplaced,
},
{
fileAlreadyExists: true,
lockFileDisabled: true,
expectedValue: lockfileDisabled,
},
{
fileAlreadyExists: false,
lockFileDisabled: true,
expectedValue: lockfileDisabled,
},
}
for _, c := range cases {
t.Run(fmt.Sprintf("%+v", c), func(t *testing.T) {
tmpdir := t.TempDir()
// Test preconditions (file already exists + lockfile option)
if c.fileAlreadyExists {
tmpLocker, err := NewDirLocker(tmpdir, "tsdb", promslog.NewNopLogger(), nil)
require.NoError(t, err)
err = os.WriteFile(tmpLocker.path, []byte{}, 0o644)
require.NoError(t, err)
}
locker, closer := open(t, tmpdir, !c.lockFileDisabled)
require.Equal(t, float64(c.expectedValue), prom_testutil.ToFloat64(locker.createdCleanly))
// Close the client. This should delete the lockfile.
closer.Close()
// Check that the lockfile is always deleted
if !c.lockFileDisabled {
_, err := os.Stat(locker.path)
require.True(t, os.IsNotExist(err), "lockfile was not deleted")
}
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/tsdbutil/dir_locker.go | tsdb/tsdbutil/dir_locker.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdbutil
import (
"errors"
"fmt"
"log/slog"
"os"
"path/filepath"
"github.com/prometheus/client_golang/prometheus"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
const (
lockfileDisabled = -1
lockfileReplaced = 0
lockfileCreatedCleanly = 1
)
type DirLocker struct {
logger *slog.Logger
createdCleanly prometheus.Gauge
releaser fileutil.Releaser
path string
}
// NewDirLocker creates a DirLocker that can obtain an exclusive lock on dir.
func NewDirLocker(dir, subsystem string, l *slog.Logger, r prometheus.Registerer) (*DirLocker, error) {
lock := &DirLocker{
logger: l,
createdCleanly: prometheus.NewGauge(prometheus.GaugeOpts{
Name: fmt.Sprintf("prometheus_%s_clean_start", subsystem),
Help: "-1: lockfile is disabled. 0: a lockfile from a previous execution was replaced. 1: lockfile creation was clean",
}),
}
if r != nil {
r.MustRegister(lock.createdCleanly)
}
lock.createdCleanly.Set(lockfileDisabled)
absdir, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
lock.path = filepath.Join(absdir, "lock")
return lock, nil
}
// Lock obtains the lock on the locker directory.
func (l *DirLocker) Lock() error {
if l.releaser != nil {
return errors.New("DB lock already obtained")
}
if _, err := os.Stat(l.path); err == nil {
l.logger.Warn("A lockfile from a previous execution already existed. It was replaced", "file", l.path)
l.createdCleanly.Set(lockfileReplaced)
} else {
l.createdCleanly.Set(lockfileCreatedCleanly)
}
lockf, _, err := fileutil.Flock(l.path)
if err != nil {
return fmt.Errorf("lock DB directory: %w", err)
}
l.releaser = lockf
return nil
}
// Release releases the lock. No-op if the lock is not held.
func (l *DirLocker) Release() error {
if l.releaser == nil {
return nil
}
errs := tsdb_errors.NewMulti()
errs.Add(l.releaser.Release())
errs.Add(os.Remove(l.path))
l.releaser = nil
return errs.Err()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/tsdbutil/histogram.go | tsdb/tsdbutil/histogram.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdbutil
import (
"math"
"github.com/prometheus/prometheus/model/histogram"
)
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
for i := range n {
h := GenerateTestHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
r = append(r, h)
}
return r
}
func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram {
h := GenerateTestHistogram(int64(n))
h.CounterResetHint = hint
return h
}
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.
func GenerateTestHistogram(i int64) *histogram.Histogram {
return &histogram.Histogram{
Count: 12 + uint64(i*9),
ZeroCount: 2 + uint64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{i + 1, 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{i + 1, 1, -1, 0},
}
}
func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) {
for i := range n {
h := GenerateTestCustomBucketsHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
r = append(r, h)
}
return r
}
func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram {
return &histogram.Histogram{
Count: 5 + uint64(i*4),
Sum: 18.4 * float64(i+1),
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{i + 1, 1, -1, 0},
CustomValues: []float64{0, 1, 2, 3, 4},
}
}
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
for x := range n {
i := int64(math.Sin(float64(x))*100) + 100
r = append(r, GenerateTestGaugeHistogram(i))
}
return r
}
func GenerateTestGaugeHistogram(i int64) *histogram.Histogram {
h := GenerateTestHistogram(i)
h.CounterResetHint = histogram.GaugeType
return h
}
func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for i := range n {
h := GenerateTestFloatHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
r = append(r, h)
}
return r
}
// GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint.
func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 12 + float64(i*9),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
}
}
func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for i := range n {
h := GenerateTestCustomBucketsFloatHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
r = append(r, h)
}
return r
}
func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 5 + float64(i*4),
Sum: 18.4 * float64(i+1),
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
CustomValues: []float64{0, 1, 2, 3, 4},
}
}
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for x := range n {
i := int64(math.Sin(float64(x))*100) + 100
r = append(r, GenerateTestGaugeFloatHistogram(i))
}
return r
}
func GenerateTestGaugeFloatHistogram(i int64) *histogram.FloatHistogram {
h := GenerateTestFloatHistogram(i)
h.CounterResetHint = histogram.GaugeType
return h
}
func SetHistogramNotCounterReset(h *histogram.Histogram) *histogram.Histogram {
h.CounterResetHint = histogram.NotCounterReset
return h
}
func SetHistogramCounterReset(h *histogram.Histogram) *histogram.Histogram {
h.CounterResetHint = histogram.CounterReset
return h
}
func SetFloatHistogramNotCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram {
h.CounterResetHint = histogram.NotCounterReset
return h
}
func SetFloatHistogramCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram {
h.CounterResetHint = histogram.CounterReset
return h
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/bstream.go | tsdb/chunkenc/bstream.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below.
// It received minor modifications to suit Prometheus's needs.
// Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package chunkenc
import (
"encoding/binary"
"io"
)
// bstream is a stream of bits.
type bstream struct {
stream []byte // The data stream.
count uint8 // How many right-most bits are available for writing in the current byte (the last byte of the stream).
}
// Reset resets b around stream.
func (b *bstream) Reset(stream []byte) {
b.stream = stream
b.count = 0
}
func (b *bstream) bytes() []byte {
return b.stream
}
type bit bool
const (
zero bit = false
one bit = true
)
func (b *bstream) writeBit(bit bit) {
if b.count == 0 {
b.stream = append(b.stream, 0)
b.count = 8
}
i := len(b.stream) - 1
if bit {
b.stream[i] |= 1 << (b.count - 1)
}
b.count--
}
func (b *bstream) writeByte(byt byte) {
if b.count == 0 {
b.stream = append(b.stream, byt)
return
}
i := len(b.stream) - 1
// Complete the last byte with the leftmost b.count bits from byt.
b.stream[i] |= byt >> (8 - b.count)
// Write the remainder, if any.
b.stream = append(b.stream, byt<<b.count)
}
// writeBits writes the nbits right-most bits of u to the stream
// in left-to-right order.
func (b *bstream) writeBits(u uint64, nbits int) {
u <<= 64 - uint(nbits)
for nbits >= 8 {
byt := byte(u >> 56)
b.writeByte(byt)
u <<= 8
nbits -= 8
}
for nbits > 0 {
b.writeBit((u >> 63) == 1)
u <<= 1
nbits--
}
}
type bstreamReader struct {
stream []byte
streamOffset int // The offset from which read the next byte from the stream.
buffer uint64 // The current buffer, filled from the stream, containing up to 8 bytes from which read bits.
valid uint8 // The number of right-most bits valid to read (from left) in the current 8 byte buffer.
last byte // A copy of the last byte of the stream.
}
func newBReader(b []byte) bstreamReader {
// The last byte of the stream can be updated later, so we take a copy.
var last byte
if len(b) > 0 {
last = b[len(b)-1]
}
return bstreamReader{
stream: b,
last: last,
}
}
func (b *bstreamReader) readBit() (bit, error) {
if b.valid == 0 {
if !b.loadNextBuffer(1) {
return false, io.EOF
}
}
return b.readBitFast()
}
// readBitFast is like readBit but can return io.EOF if the internal buffer is empty.
// If it returns io.EOF, the caller should retry reading bits calling readBit().
// This function must be kept small and a leaf in order to help the compiler inlining it
// and further improve performances.
func (b *bstreamReader) readBitFast() (bit, error) {
if b.valid == 0 {
return false, io.EOF
}
b.valid--
bitmask := uint64(1) << b.valid
return (b.buffer & bitmask) != 0, nil
}
// readBits constructs a uint64 with the nbits right-most bits
// read from the stream, and any other bits 0.
func (b *bstreamReader) readBits(nbits uint8) (uint64, error) {
if b.valid == 0 {
if !b.loadNextBuffer(nbits) {
return 0, io.EOF
}
}
if nbits <= b.valid {
return b.readBitsFast(nbits)
}
// We have to read all remaining valid bits from the current buffer and a part from the next one.
bitmask := (uint64(1) << b.valid) - 1
nbits -= b.valid
v := (b.buffer & bitmask) << nbits
b.valid = 0
if !b.loadNextBuffer(nbits) {
return 0, io.EOF
}
bitmask = (uint64(1) << nbits) - 1
v |= ((b.buffer >> (b.valid - nbits)) & bitmask)
b.valid -= nbits
return v, nil
}
// readBitsFast is like readBits but can return io.EOF if the internal buffer is empty.
// If it returns io.EOF, the caller should retry reading bits calling readBits().
// This function must be kept small and a leaf in order to help the compiler inlining it
// and further improve performances.
func (b *bstreamReader) readBitsFast(nbits uint8) (uint64, error) {
if nbits > b.valid {
return 0, io.EOF
}
bitmask := (uint64(1) << nbits) - 1
b.valid -= nbits
return (b.buffer >> b.valid) & bitmask, nil
}
func (b *bstreamReader) ReadByte() (byte, error) {
v, err := b.readBits(8)
if err != nil {
return 0, err
}
return byte(v), nil
}
// loadNextBuffer loads the next bytes from the stream into the internal buffer.
// The input nbits is the minimum number of bits that must be read, but the implementation
// can read more (if possible) to improve performances.
func (b *bstreamReader) loadNextBuffer(nbits uint8) bool {
if b.streamOffset >= len(b.stream) {
return false
}
// Handle the case there are more then 8 bytes in the buffer (most common case)
// in a optimized way. It's guaranteed that this branch will never read from the
// very last byte of the stream (which suffers race conditions due to concurrent
// writes).
if b.streamOffset+8 < len(b.stream) {
b.buffer = binary.BigEndian.Uint64(b.stream[b.streamOffset:])
b.streamOffset += 8
b.valid = 64
return true
}
// We're here if there are 8 or less bytes left in the stream.
// The following code is slower but called less frequently.
nbytes := int((nbits / 8) + 1)
if b.streamOffset+nbytes > len(b.stream) {
nbytes = len(b.stream) - b.streamOffset
}
buffer := uint64(0)
skip := 0
if b.streamOffset+nbytes == len(b.stream) {
// There can be concurrent writes happening on the very last byte
// of the stream, so use the copy we took at initialization time.
buffer |= uint64(b.last)
// Read up to the byte before
skip = 1
}
for i := 0; i < nbytes-skip; i++ {
buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
}
b.buffer = buffer
b.streamOffset += nbytes
b.valid = uint8(nbytes * 8)
return true
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/chunk_test.go | tsdb/chunkenc/chunk_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"errors"
"fmt"
"io"
"math/rand"
"testing"
"github.com/stretchr/testify/require"
)
type pair struct {
t int64
v float64
}
func TestChunk(t *testing.T) {
for enc, nc := range map[Encoding]func() Chunk{
EncXOR: func() Chunk { return NewXORChunk() },
} {
t.Run(fmt.Sprintf("%v", enc), func(t *testing.T) {
for range make([]struct{}, 1) {
c := nc()
testChunk(t, c)
}
})
}
}
func testChunk(t *testing.T, c Chunk) {
app, err := c.Appender()
require.NoError(t, err)
var exp []pair
var (
ts = int64(1234123324)
v = 1243535.123
)
for i := range 300 {
ts += int64(rand.Intn(10000) + 1)
if i%2 == 0 {
v += float64(rand.Intn(1000000))
} else {
v -= float64(rand.Intn(1000000))
}
// Start with a new appender every 10th sample. This emulates starting
// appending to a partially filled chunk.
if i%10 == 0 {
app, err = c.Appender()
require.NoError(t, err)
}
app.Append(ts, v)
exp = append(exp, pair{t: ts, v: v})
}
// 1. Expand iterator in simple case.
it1 := c.Iterator(nil)
var res1 []pair
for it1.Next() == ValFloat {
ts, v := it1.At()
res1 = append(res1, pair{t: ts, v: v})
}
require.NoError(t, it1.Err())
require.Equal(t, exp, res1)
// 2. Expand second iterator while reusing first one.
it2 := c.Iterator(it1)
var res2 []pair
for it2.Next() == ValFloat {
ts, v := it2.At()
res2 = append(res2, pair{t: ts, v: v})
}
require.NoError(t, it2.Err())
require.Equal(t, exp, res2)
// 3. Test iterator Seek.
mid := len(exp) / 2
it3 := c.Iterator(nil)
var res3 []pair
require.Equal(t, ValFloat, it3.Seek(exp[mid].t))
// Below ones should not matter.
require.Equal(t, ValFloat, it3.Seek(exp[mid].t))
require.Equal(t, ValFloat, it3.Seek(exp[mid].t))
ts, v = it3.At()
res3 = append(res3, pair{t: ts, v: v})
for it3.Next() == ValFloat {
ts, v := it3.At()
res3 = append(res3, pair{t: ts, v: v})
}
require.NoError(t, it3.Err())
require.Equal(t, exp[mid:], res3)
require.Equal(t, ValNone, it3.Seek(exp[len(exp)-1].t+1))
}
func TestPool(t *testing.T) {
p := NewPool()
for _, tc := range []struct {
name string
encoding Encoding
expErr error
}{
{
name: "xor",
encoding: EncXOR,
},
{
name: "histogram",
encoding: EncHistogram,
},
{
name: "float histogram",
encoding: EncFloatHistogram,
},
{
name: "invalid encoding",
encoding: EncNone,
expErr: errors.New(`invalid chunk encoding "none"`),
},
} {
t.Run(tc.name, func(t *testing.T) {
c, err := p.Get(tc.encoding, []byte("test"))
if tc.expErr != nil {
require.EqualError(t, err, tc.expErr.Error())
return
}
require.NoError(t, err)
var b *bstream
switch tc.encoding {
case EncHistogram:
b = &c.(*HistogramChunk).b
case EncFloatHistogram:
b = &c.(*FloatHistogramChunk).b
default:
b = &c.(*XORChunk).b
}
require.Equal(t, &bstream{
stream: []byte("test"),
count: 0,
}, b)
b.count = 1
require.NoError(t, p.Put(c))
require.Equal(t, &bstream{
stream: nil,
count: 0,
}, b)
})
}
t.Run("put bad chunk wrapper", func(t *testing.T) {
// When a wrapping chunk poses as an encoding it can't be converted to, Put should skip it.
c := fakeChunk{
encoding: EncXOR,
t: t,
}
require.NoError(t, p.Put(c))
})
t.Run("put invalid encoding", func(t *testing.T) {
c := fakeChunk{
encoding: EncNone,
t: t,
}
require.EqualError(t, p.Put(c), `invalid chunk encoding "none"`)
})
}
type fakeChunk struct {
Chunk
encoding Encoding
t *testing.T
}
func (c fakeChunk) Encoding() Encoding {
return c.encoding
}
func (c fakeChunk) Reset([]byte) {
c.t.Fatal("Reset should not be called")
}
func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
const samplesPerChunk = 250
var (
t = int64(1234123324)
v = 1243535.123
exp []pair
)
for range samplesPerChunk {
// t += int64(rand.Intn(10000) + 1)
t += int64(1000)
// v = rand.Float64()
v += float64(100)
exp = append(exp, pair{t: t, v: v})
}
chunk := newChunk()
{
a, err := chunk.Appender()
if err != nil {
b.Fatalf("get appender: %s", err)
}
j := 0
for _, p := range exp {
if j > 250 {
break
}
a.Append(p.t, p.v)
j++
}
}
b.ReportAllocs()
var res float64
var it Iterator
for i := 0; b.Loop(); {
it := chunk.Iterator(it)
for it.Next() == ValFloat {
_, v := it.At()
res = v
i++
}
if err := it.Err(); err != nil && !errors.Is(err, io.EOF) {
require.NoError(b, err)
}
_ = res
}
}
func newXORChunk() Chunk {
return NewXORChunk()
}
func BenchmarkXORIterator(b *testing.B) {
benchmarkIterator(b, newXORChunk)
}
func BenchmarkXORAppender(b *testing.B) {
r := rand.New(rand.NewSource(1))
b.Run("constant", func(b *testing.B) {
benchmarkAppender(b, func() (int64, float64) {
return 1000, 0
}, newXORChunk)
})
b.Run("random steps", func(b *testing.B) {
benchmarkAppender(b, func() (int64, float64) {
return int64(r.Intn(100) - 50 + 15000), // 15 seconds +- up to 100ms of jitter.
float64(r.Intn(100) - 50) // Varying from -50 to +50 in 100 discrete steps.
}, newXORChunk)
})
b.Run("random 0-1", func(b *testing.B) {
benchmarkAppender(b, func() (int64, float64) {
return int64(r.Intn(100) - 50 + 15000), // 15 seconds +- up to 100ms of jitter.
r.Float64() // Random between 0 and 1.0.
}, newXORChunk)
})
}
func benchmarkAppender(b *testing.B, deltas func() (int64, float64), newChunk func() Chunk) {
var (
t = int64(1234123324)
v = 1243535.123
)
const nSamples = 120 // Same as tsdb.DefaultSamplesPerChunk.
var exp []pair
for range nSamples {
dt, dv := deltas()
t += dt
v += dv
exp = append(exp, pair{t: t, v: v})
}
b.ReportAllocs()
for b.Loop() {
c := newChunk()
a, err := c.Appender()
if err != nil {
b.Fatalf("get appender: %s", err)
}
for _, p := range exp {
a.Append(p.t, p.v)
}
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/varbit.go | tsdb/chunkenc/varbit.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"fmt"
"math/bits"
)
// putVarbitInt writes an int64 using varbit encoding with a bit bucketing
// optimized for the dod's observed in histogram buckets, plus a few additional
// buckets for large numbers.
//
// For optimal space utilization, each branch didn't need to support any values
// of any of the prior branches. So we could expand the range of each branch. Do
// more with fewer bits. It would come at the price of more expensive encoding
// and decoding (cutting out and later adding back that center-piece we
// skip). With the distributions of values we see in practice, we would reduce
// the size by around 1%. A more detailed study would be needed for precise
// values, but it's appears quite certain that we would end up far below 10%,
// which would maybe convince us to invest the increased coding/decoding cost.
func putVarbitInt(b *bstream, val int64) {
switch {
case val == 0: // Precisely 0, needs 1 bit.
b.writeBit(zero)
case bitRange(val, 3): // -3 <= val <= 4, needs 5 bits.
b.writeBits(0b10, 2)
b.writeBits(uint64(val), 3)
case bitRange(val, 6): // -31 <= val <= 32, 9 bits.
b.writeBits(0b110, 3)
b.writeBits(uint64(val), 6)
case bitRange(val, 9): // -255 <= val <= 256, 13 bits.
b.writeBits(0b1110, 4)
b.writeBits(uint64(val), 9)
case bitRange(val, 12): // -2047 <= val <= 2048, 17 bits.
b.writeBits(0b11110, 5)
b.writeBits(uint64(val), 12)
case bitRange(val, 18): // -131071 <= val <= 131072, 3 bytes.
b.writeBits(0b111110, 6)
b.writeBits(uint64(val), 18)
case bitRange(val, 25): // -16777215 <= val <= 16777216, 4 bytes.
b.writeBits(0b1111110, 7)
b.writeBits(uint64(val), 25)
case bitRange(val, 56): // -36028797018963967 <= val <= 36028797018963968, 8 bytes.
b.writeBits(0b11111110, 8)
b.writeBits(uint64(val), 56)
default:
b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
b.writeBits(uint64(val), 64)
}
}
// readVarbitInt reads an int64 encoded with putVarbitInt.
func readVarbitInt(b *bstreamReader) (int64, error) {
var d byte
for range 8 {
d <<= 1
bit, err := b.readBitFast()
if err != nil {
bit, err = b.readBit()
}
if err != nil {
return 0, err
}
if bit == zero {
break
}
d |= 1
}
var val int64
var sz uint8
switch d {
case 0b0:
// val == 0
case 0b10:
sz = 3
case 0b110:
sz = 6
case 0b1110:
sz = 9
case 0b11110:
sz = 12
case 0b111110:
sz = 18
case 0b1111110:
sz = 25
case 0b11111110:
sz = 56
case 0b11111111:
// Do not use fast because it's very unlikely it will succeed.
bits, err := b.readBits(64)
if err != nil {
return 0, err
}
val = int64(bits)
default:
return 0, fmt.Errorf("invalid bit pattern %b", d)
}
if sz != 0 {
bits, err := b.readBitsFast(sz)
if err != nil {
bits, err = b.readBits(sz)
}
if err != nil {
return 0, err
}
if bits > (1 << (sz - 1)) {
// Or something.
bits -= (1 << sz)
}
val = int64(bits)
}
return val, nil
}
func bitRangeUint(x uint64, nbits int) bool {
return bits.LeadingZeros64(x) >= 64-nbits
}
// putVarbitUint writes a uint64 using varbit encoding. It uses the same bit
// buckets as putVarbitInt.
func putVarbitUint(b *bstream, val uint64) {
switch {
case val == 0: // Precisely 0, needs 1 bit.
b.writeBit(zero)
case bitRangeUint(val, 3): // val <= 7, needs 5 bits.
b.writeBits(0b10, 2)
b.writeBits(val, 3)
case bitRangeUint(val, 6): // val <= 63, 9 bits.
b.writeBits(0b110, 3)
b.writeBits(val, 6)
case bitRangeUint(val, 9): // val <= 511, 13 bits.
b.writeBits(0b1110, 4)
b.writeBits(val, 9)
case bitRangeUint(val, 12): // val <= 4095, 17 bits.
b.writeBits(0b11110, 5)
b.writeBits(val, 12)
case bitRangeUint(val, 18): // val <= 262143, 3 bytes.
b.writeBits(0b111110, 6)
b.writeBits(val, 18)
case bitRangeUint(val, 25): // val <= 33554431, 4 bytes.
b.writeBits(0b1111110, 7)
b.writeBits(val, 25)
case bitRangeUint(val, 56): // val <= 72057594037927935, 8 bytes.
b.writeBits(0b11111110, 8)
b.writeBits(val, 56)
default:
b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
b.writeBits(val, 64)
}
}
// readVarbitUint reads a uint64 encoded with putVarbitUint.
func readVarbitUint(b *bstreamReader) (uint64, error) {
var d byte
for range 8 {
d <<= 1
bit, err := b.readBitFast()
if err != nil {
bit, err = b.readBit()
}
if err != nil {
return 0, err
}
if bit == zero {
break
}
d |= 1
}
var (
bits uint64
sz uint8
err error
)
switch d {
case 0b0:
// val == 0
case 0b10:
sz = 3
case 0b110:
sz = 6
case 0b1110:
sz = 9
case 0b11110:
sz = 12
case 0b111110:
sz = 18
case 0b1111110:
sz = 25
case 0b11111110:
sz = 56
case 0b11111111:
// Do not use fast because it's very unlikely it will succeed.
bits, err = b.readBits(64)
if err != nil {
return 0, err
}
default:
return 0, fmt.Errorf("invalid bit pattern %b", d)
}
if sz != 0 {
bits, err = b.readBitsFast(sz)
if err != nil {
bits, err = b.readBits(sz)
}
if err != nil {
return 0, err
}
}
return bits, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/float_histogram.go | tsdb/chunkenc/float_histogram.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/value"
)
// FloatHistogramChunk holds encoded sample data for a sparse, high-resolution
// float histogram.
//
// Each sample has multiple "fields", stored in the following way (raw = store
// number directly, delta = store delta to the previous number, dod = store
// delta of the delta to the previous number, xor = what we do for regular
// sample values):
//
// field → ts count zeroCount sum []posbuckets []negbuckets
// sample 1 raw raw raw raw []raw []raw
// sample 2 delta xor xor xor []xor []xor
// sample >2 dod xor xor xor []xor []xor
type FloatHistogramChunk struct {
b bstream
}
// NewFloatHistogramChunk returns a new chunk with float histogram encoding.
func NewFloatHistogramChunk() *FloatHistogramChunk {
b := make([]byte, histogramHeaderSize, chunkAllocationSize)
return &FloatHistogramChunk{b: bstream{stream: b, count: 0}}
}
func (c *FloatHistogramChunk) Reset(stream []byte) {
c.b.Reset(stream)
}
// xorValue holds all the necessary information to encode
// and decode XOR encoded float64 values.
type xorValue struct {
value float64
leading uint8
trailing uint8
}
// Encoding returns the encoding type.
func (*FloatHistogramChunk) Encoding() Encoding {
return EncFloatHistogram
}
// Bytes returns the underlying byte slice of the chunk.
func (c *FloatHistogramChunk) Bytes() []byte {
return c.b.bytes()
}
// NumSamples returns the number of samples in the chunk.
func (c *FloatHistogramChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header.
func (c *FloatHistogramChunk) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(c.Bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
// Compact implements the Chunk interface.
func (c *FloatHistogramChunk) Compact() {
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
buf := make([]byte, l)
copy(buf, c.b.stream)
c.b.stream = buf
}
}
// Appender implements the Chunk interface.
func (c *FloatHistogramChunk) Appender() (Appender, error) {
if len(c.b.stream) == histogramHeaderSize { // Avoid allocating an Iterator when chunk is empty.
return &FloatHistogramAppender{b: &c.b, t: math.MinInt64, sum: xorValue{leading: 0xff}, cnt: xorValue{leading: 0xff}, zCnt: xorValue{leading: 0xff}}, nil
}
it := c.iterator(nil)
// To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state.
for it.Next() == ValFloatHistogram {
}
if err := it.Err(); err != nil {
return nil, err
}
pBuckets := make([]xorValue, len(it.pBuckets))
for i := 0; i < len(it.pBuckets); i++ {
pBuckets[i] = xorValue{
value: it.pBuckets[i],
leading: it.pBucketsLeading[i],
trailing: it.pBucketsTrailing[i],
}
}
nBuckets := make([]xorValue, len(it.nBuckets))
for i := 0; i < len(it.nBuckets); i++ {
nBuckets[i] = xorValue{
value: it.nBuckets[i],
leading: it.nBucketsLeading[i],
trailing: it.nBucketsTrailing[i],
}
}
a := &FloatHistogramAppender{
b: &c.b,
schema: it.schema,
zThreshold: it.zThreshold,
pSpans: it.pSpans,
nSpans: it.nSpans,
customValues: it.customValues,
t: it.t,
tDelta: it.tDelta,
cnt: it.cnt,
zCnt: it.zCnt,
pBuckets: pBuckets,
nBuckets: nBuckets,
sum: it.sum,
}
return a, nil
}
func (c *FloatHistogramChunk) iterator(it Iterator) *floatHistogramIterator {
// This comment is copied from XORChunk.iterator:
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
// When using striped locks to guard access to chunks, probably yes.
// Could only copy data if the chunk is not completed yet.
if histogramIter, ok := it.(*floatHistogramIterator); ok {
histogramIter.Reset(c.b.bytes())
return histogramIter
}
return newFloatHistogramIterator(c.b.bytes())
}
func newFloatHistogramIterator(b []byte) *floatHistogramIterator {
it := &floatHistogramIterator{
br: newBReader(b[histogramHeaderSize:]),
numTotal: binary.BigEndian.Uint16(b),
t: math.MinInt64,
}
it.counterResetHeader = CounterResetHeader(b[histogramFlagPos] & CounterResetHeaderMask)
return it
}
// Iterator implements the Chunk interface.
func (c *FloatHistogramChunk) Iterator(it Iterator) Iterator {
return c.iterator(it)
}
// FloatHistogramAppender is an Appender implementation for float histograms.
type FloatHistogramAppender struct {
b *bstream
// Layout:
schema int32
zThreshold float64
pSpans, nSpans []histogram.Span
customValues []float64
t, tDelta int64
sum, cnt, zCnt xorValue
pBuckets, nBuckets []xorValue
}
func (a *FloatHistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
func (a *FloatHistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
a.b.bytes()[histogramFlagPos] = (a.b.bytes()[histogramFlagPos] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
}
func (a *FloatHistogramAppender) NumSamples() int {
return int(binary.BigEndian.Uint16(a.b.bytes()))
}
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
func (*FloatHistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk")
}
// appendable returns whether the chunk can be appended to, and if so whether
// 1. Any recoding needs to happen to the chunk using the provided forward
// inserts (in case of any new buckets, positive or negative range,
// respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// If the sample is a gauge histogram, AppendableGauge must be used instead.
//
// The chunk is not appendable in the following cases:
//
// - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed.
// - Any buckets have disappeared, unless the bucket count was 0, unused.
// Empty bucket can happen if the chunk was recoded and we're merging a non
// recoded histogram. In this case backward inserts will be provided.
// - There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
// - The last sample in the chunk was stale while the current sample is not stale.
//
// The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to
// append. If counterReset is true, okToAppend is always false.
func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
okToAppend, counterReset bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.CounterResetHint == histogram.CounterReset {
// Always honor the explicit counter reset hint.
counterReset = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if value.IsStaleNaN(a.sum.value) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.Count < a.cnt.value {
// There has been a counter reset.
counterReset = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
counterReset = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.ZeroCount < a.zCnt.value {
// There has been a counter reset since ZeroThreshold didn't change.
counterReset = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
var ok bool
positiveInserts, backwardPositiveInserts, ok = expandFloatSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
if !ok {
counterReset = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
negativeInserts, backwardNegativeInserts, ok = expandFloatSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
if !ok {
counterReset = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
// expandFloatSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
// they match the spans in 'b'. 'b' must cover the same or more buckets than
// 'a', otherwise the function will return false.
// The function also returns the inserts to expand 'b' to also cover all the
// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
// The function also checks for counter resets between 'a' and 'b'.
//
// Example:
//
// Let's say the old buckets look like this:
//
// span syntax: [offset, length]
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
// raw values 6 3 3 2 4 5 1
// deltas 6 -3 0 -1 2 1 -4
//
// But now we introduce a new bucket layout. (Carefully chosen example where we
// have a span appended, one unchanged[*], one prepended, and two merge - in
// that order.)
//
// [*] unchanged in terms of which bucket indices they represent. but to achieve
// that, their offset needs to change if "disrupted" by spans changing ahead of
// them
//
// \/ this one is "unchanged"
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
// raw values 6 3 0 3 0 0 2 4 5 0 1
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
// delta mods: / \ / \ / \
//
// Note for histograms with delta-encoded buckets: Whenever any new buckets are
// introduced, the subsequent "old" bucket needs to readjust its delta to the
// new base of 0. Thus, for the caller who wants to transform the set of
// original deltas to a new set of deltas to match a new span layout that adds
// buckets, we simply need to generate a list of inserts.
//
// Note: Within expandFloatSpansAndBuckets we don't have to worry about the changes to the
// spans themselves, thanks to the iterators we get to work with the more useful
// bucket indices (which of course directly correspond to the buckets we have to
// adjust).
func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuckets []float64) (forward, backward []Insert, ok bool) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
// be yielded when we finish a streak of new buckets.
var aInter Insert
var bInter Insert
aIdx, aOK := ai.Next()
bIdx, bOK := bi.Next()
// Bucket count. Initialize the absolute count and index into the
// positive/negative counts or deltas array. The bucket count is
// used to detect counter reset as well as unused buckets in a.
var (
aCount float64
bCount float64
aCountIdx int
bCountIdx int
)
if aOK {
aCount = aBuckets[aCountIdx].value
}
if bOK {
bCount = bBuckets[bCountIdx]
}
// addInsert updates the current Insert with a new insert at the given
// bucket index (otherIdx).
addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert {
if insert.num == 0 {
// First insert.
insert.bucketIdx = otherIdx
} else if insert.bucketIdx+insert.num != otherIdx {
// Insert is not continuous from previous insert.
inserts = append(inserts, *insert)
insert.num = 0
insert.bucketIdx = otherIdx
}
insert.num++
return inserts
}
advanceA := func() {
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
aIdx, aOK = ai.Next()
aInter.pos++
aCountIdx++
if aOK {
aCount = aBuckets[aCountIdx].value
}
}
advanceB := func() {
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
bIdx, bOK = bi.Next()
bInter.pos++
bCountIdx++
if bOK {
bCount = bBuckets[bCountIdx]
}
}
loop:
for {
switch {
case aOK && bOK:
switch {
case aIdx == bIdx: // Both have an identical bucket index.
// Bucket count. Check bucket for reset from a to b.
if aCount > bCount {
return nil, nil, false
}
advanceA()
advanceB()
continue
case aIdx < bIdx: // b misses a bucket index that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInserts = addInsert(bInserts, &bInter, aIdx)
advanceA()
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
aInserts = addInsert(aInserts, &aInter, bIdx)
advanceB()
}
case aOK && !bOK: // b misses a value that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInserts = addInsert(bInserts, &bInter, aIdx)
advanceA()
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
aInserts = addInsert(aInserts, &aInter, bIdx)
advanceB()
default: // Both iterators ran out. We're done.
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
}
break loop
}
}
return aInserts, bInserts, true
}
// appendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided inserts
// (in case of any new buckets, positive or negative range, respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// This method must be only used for gauge histograms.
//
// The chunk is not appendable in the following cases:
// - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(a.sum.value) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
// appendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
// the histogram is properly structured, e.g. the number of buckets used
// corresponds to the number conveyed by the span structures. First call
// Appendable() and act accordingly!
func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.FloatHistogram) {
var tDelta int64
num := binary.BigEndian.Uint16(a.b.bytes())
if value.IsStaleNaN(h.Sum) {
// Emptying out other fields to write no buckets, and an empty
// layout in case of first histogram in the chunk.
h = &histogram.FloatHistogram{Sum: h.Sum}
}
if num == 0 {
// The first append gets the privilege to dictate the layout
// but it's also responsible for encoding it into the chunk!
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
a.schema = h.Schema
a.zThreshold = h.ZeroThreshold
if len(h.PositiveSpans) > 0 {
a.pSpans = make([]histogram.Span, len(h.PositiveSpans))
copy(a.pSpans, h.PositiveSpans)
} else {
a.pSpans = nil
}
if len(h.NegativeSpans) > 0 {
a.nSpans = make([]histogram.Span, len(h.NegativeSpans))
copy(a.nSpans, h.NegativeSpans)
} else {
a.nSpans = nil
}
if len(h.CustomValues) > 0 {
a.customValues = make([]float64, len(h.CustomValues))
copy(a.customValues, h.CustomValues)
} else {
a.customValues = nil
}
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
if numPBuckets > 0 {
a.pBuckets = make([]xorValue, numPBuckets)
for i := range numPBuckets {
a.pBuckets[i] = xorValue{
value: h.PositiveBuckets[i],
leading: 0xff,
}
}
} else {
a.pBuckets = nil
}
if numNBuckets > 0 {
a.nBuckets = make([]xorValue, numNBuckets)
for i := range numNBuckets {
a.nBuckets[i] = xorValue{
value: h.NegativeBuckets[i],
leading: 0xff,
}
}
} else {
a.nBuckets = nil
}
// Now store the actual data.
putVarbitInt(a.b, t)
a.b.writeBits(math.Float64bits(h.Count), 64)
a.b.writeBits(math.Float64bits(h.ZeroCount), 64)
a.b.writeBits(math.Float64bits(h.Sum), 64)
a.cnt.value = h.Count
a.zCnt.value = h.ZeroCount
a.sum.value = h.Sum
for _, b := range h.PositiveBuckets {
a.b.writeBits(math.Float64bits(b), 64)
}
for _, b := range h.NegativeBuckets {
a.b.writeBits(math.Float64bits(b), 64)
}
} else {
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
// so we don't need a separate single delta logic for the 2nd sample.
tDelta = t - a.t
tDod := tDelta - a.tDelta
putVarbitInt(a.b, tDod)
a.writeXorValue(&a.cnt, h.Count)
a.writeXorValue(&a.zCnt, h.ZeroCount)
a.writeXorValue(&a.sum, h.Sum)
for i, b := range h.PositiveBuckets {
a.writeXorValue(&a.pBuckets[i], b)
}
for i, b := range h.NegativeBuckets {
a.writeXorValue(&a.nBuckets[i], b)
}
}
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
a.t = t
a.tDelta = tDelta
}
func (a *FloatHistogramAppender) writeXorValue(old *xorValue, v float64) {
xorWrite(a.b, v, old.value, &old.leading, &old.trailing)
old.value = v
}
// recode converts the current chunk to accommodate an expansion of the set of
// (positive and/or negative) buckets used, according to the provided inserts,
// resulting in the honoring of the provided new positive and negative spans. To
// continue appending, use the returned Appender rather than the receiver of
// this method.
func (a *FloatHistogramAppender) recode(
positiveInserts, negativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
) (Chunk, Appender) {
// TODO(beorn7): This currently just decodes everything and then encodes
// it again with the new span layout. This can probably be done in-place
// by editing the chunk. But let's first see how expensive it is in the
// big picture. Also, in-place editing might create concurrency issues.
byts := a.b.bytes()
it := newFloatHistogramIterator(byts)
hc := NewFloatHistogramChunk()
app, err := hc.Appender()
if err != nil {
panic(err) // This should never happen for an empty float histogram chunk.
}
happ := app.(*FloatHistogramAppender)
numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
for it.Next() == ValFloatHistogram {
tOld, hOld := it.AtFloatHistogram(nil)
// We have to newly allocate slices for the modified buckets
// here because they are kept by the appender until the next
// append.
// TODO(beorn7): We might be able to optimize this.
var positiveBuckets, negativeBuckets []float64
if numPositiveBuckets > 0 {
positiveBuckets = make([]float64, numPositiveBuckets)
}
if numNegativeBuckets > 0 {
negativeBuckets = make([]float64, numNegativeBuckets)
}
// Save the modified histogram to the new chunk.
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
if len(positiveInserts) > 0 {
hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, false)
}
if len(negativeInserts) > 0 {
hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, false)
}
happ.appendFloatHistogram(tOld, hOld)
}
happ.setCounterResetHeader(CounterResetHeader(byts[histogramFlagPos] & CounterResetHeaderMask))
return hc, app
}
// recodeHistogram converts the current histogram (in-place) to accommodate an expansion of the set of
// (positive and/or negative) buckets used.
func (*FloatHistogramAppender) recodeHistogram(
fh *histogram.FloatHistogram,
pBackwardInter, nBackwardInter []Insert,
) {
if len(pBackwardInter) > 0 {
numPositiveBuckets := countSpans(fh.PositiveSpans)
fh.PositiveBuckets = insert(fh.PositiveBuckets, make([]float64, numPositiveBuckets), pBackwardInter, false)
}
if len(nBackwardInter) > 0 {
numNegativeBuckets := countSpans(fh.NegativeSpans)
fh.NegativeBuckets = insert(fh.NegativeBuckets, make([]float64, numNegativeBuckets), nBackwardInter, false)
}
}
func (*FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float histogram chunk")
}
func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (Chunk, bool, Appender, error) {
if a.NumSamples() == 0 {
a.appendFloatHistogram(t, h)
if h.CounterResetHint == histogram.GaugeType {
a.setCounterResetHeader(GaugeType)
return nil, false, a, nil
}
switch {
case h.CounterResetHint == histogram.CounterReset:
// Always honor the explicit counter reset hint.
a.setCounterResetHeader(CounterReset)
case prev != nil:
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
_, _, _, _, _, counterReset := prev.appendable(h)
if counterReset {
a.setCounterResetHeader(CounterReset)
} else {
a.setCounterResetHeader(NotCounterReset)
}
}
return nil, false, a, nil
}
// Adding counter-like histogram.
if h.CounterResetHint != histogram.GaugeType {
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h)
if !okToAppend || counterReset {
if appendOnly {
if counterReset {
return nil, false, a, errors.New("float histogram counter reset")
}
return nil, false, a, errors.New("float histogram schema change")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty float histogram chunk.
}
happ := app.(*FloatHistogramAppender)
if counterReset {
happ.setCounterResetHeader(CounterReset)
}
happ.appendFloatHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
// No new buckets from the histogram, so the spans of the appender can accommodate the new buckets.
// However we need to make a copy in case the input is sharing spans from an iterator.
h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
copy(h.PositiveSpans, a.pSpans)
h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
copy(h.NegativeSpans, a.nSpans)
} else {
// Spans need pre-adjusting to accommodate the new buckets.
h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
}
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
return chk, true, app, nil
}
a.appendFloatHistogram(t, h)
return nil, false, a, nil
}
// Adding gauge histogram.
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, errors.New("float gauge histogram schema change")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty float histogram chunk.
}
happ := app.(*FloatHistogramAppender)
happ.setCounterResetHeader(GaugeType)
happ.appendFloatHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram layout change with %d positive and %d negative backwards inserts", len(pBackwardInserts), len(nBackwardInserts))
}
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
return chk, true, app, nil
}
a.appendFloatHistogram(t, h)
return nil, false, a, nil
}
type floatHistogramIterator struct {
br bstreamReader
numTotal uint16
numRead uint16
counterResetHeader CounterResetHeader
// Layout:
schema int32
zThreshold float64
pSpans, nSpans []histogram.Span
customValues []float64
// For the fields that are tracked as deltas and ultimately dod's.
t int64
tDelta int64
// All Gorilla xor encoded.
sum, cnt, zCnt xorValue
// Buckets are not of type xorValue to avoid creating
// new slices for every AtFloatHistogram call.
pBuckets, nBuckets []float64
pBucketsLeading, nBucketsLeading []uint8
pBucketsTrailing, nBucketsTrailing []uint8
err error
// Track calls to retrieve methods. Once they have been called, we
// cannot recycle the bucket slices anymore because we have returned
// them in the histogram.
atFloatHistogramCalled bool
}
func (it *floatHistogramIterator) Seek(t int64) ValueType {
if it.err != nil {
return ValNone
}
for t > it.t || it.numRead == 0 {
if it.Next() == ValNone {
return ValNone
}
}
return ValFloatHistogram
}
func (*floatHistogramIterator) At() (int64, float64) {
panic("cannot call floatHistogramIterator.At")
}
func (*floatHistogramIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
panic("cannot call floatHistogramIterator.AtHistogram")
}
func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
if value.IsStaleNaN(it.sum.value) {
return it.t, &histogram.FloatHistogram{Sum: it.sum.value}
}
if fh == nil {
it.atFloatHistogramCalled = true
fh = &histogram.FloatHistogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: it.cnt.value,
ZeroCount: it.zCnt.value,
Sum: it.sum.value,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets,
CustomValues: it.customValues,
}
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// chunk is from a newer Prometheus version that supports higher
// resolution.
fh = fh.Copy()
if err := fh.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
// With the checks above, this can only happen
// with invalid data in a chunk. As this is a
// rare edge case of a rare edge case, we'd
// rather not create all the plumbing to handle
// this error gracefully.
panic(err)
}
}
return it.t, fh
}
fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/bstream_test.go | tsdb/chunkenc/bstream_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestBstream_Reset(t *testing.T) {
bs := bstream{
stream: []byte("test"),
count: 10,
}
bs.Reset([]byte("was reset"))
require.Equal(t, bstream{
stream: []byte("was reset"),
count: 0,
}, bs)
}
func TestBstreamReader(t *testing.T) {
// Write to the bit stream.
w := bstream{}
for _, bit := range []bit{true, false} {
w.writeBit(bit)
}
for nbits := 1; nbits <= 64; nbits++ {
w.writeBits(uint64(nbits), nbits)
}
for v := 1; v < 10000; v += 123 {
w.writeBits(uint64(v), 29)
}
// Read back.
r := newBReader(w.bytes())
for _, bit := range []bit{true, false} {
v, err := r.readBitFast()
if err != nil {
v, err = r.readBit()
}
require.NoError(t, err)
require.Equal(t, bit, v)
}
for nbits := uint8(1); nbits <= 64; nbits++ {
v, err := r.readBitsFast(nbits)
if err != nil {
v, err = r.readBits(nbits)
}
require.NoError(t, err)
require.Equal(t, uint64(nbits), v, "nbits=%d", nbits)
}
for v := 1; v < 10000; v += 123 {
actual, err := r.readBitsFast(29)
if err != nil {
actual, err = r.readBits(29)
}
require.NoError(t, err)
require.Equal(t, uint64(v), actual, "v=%d", v)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/histogram_test.go | tsdb/chunkenc/histogram_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
type result struct {
t int64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
func TestFirstHistogramExplicitCounterReset(t *testing.T) {
tests := map[string]struct {
hint histogram.CounterResetHint
expHeader CounterResetHeader
expHint histogram.CounterResetHint
}{
"CounterReset": {
hint: histogram.CounterReset,
expHeader: CounterReset,
expHint: histogram.UnknownCounterReset,
},
"NotCounterReset": {
hint: histogram.NotCounterReset,
expHeader: UnknownCounterReset,
expHint: histogram.UnknownCounterReset,
},
"UnknownCounterReset": {
hint: histogram.UnknownCounterReset,
expHeader: UnknownCounterReset,
expHint: histogram.UnknownCounterReset,
},
"Gauge": {
hint: histogram.GaugeType,
expHeader: GaugeType,
expHint: histogram.GaugeType,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
h := &histogram.Histogram{
CounterResetHint: test.hint,
}
chk := NewHistogramChunk()
app, err := chk.Appender()
require.NoError(t, err)
newChk, recoded, newApp, err := app.AppendHistogram(nil, 0, h, false)
require.NoError(t, err)
require.Nil(t, newChk)
require.False(t, recoded)
require.Equal(t, app, newApp)
require.Equal(t, test.expHeader, chk.GetCounterResetHeader())
assertFirstIntHistogramSampleHint(t, chk, test.expHint)
})
}
}
func TestHistogramChunkSameBuckets(t *testing.T) {
c := NewHistogramChunk()
var exp []result
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h := &histogram.Histogram{
Count: 15,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-100,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0}, // counts: 1, 2, 1, 1 (total 5)
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 1},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8)
}
chk, _, app, err := app.AppendHistogram(nil, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
exp = append(exp, result{t: ts, h: h, fh: h.ToFloat(nil)})
require.Equal(t, 1, c.NumSamples())
// Add an updated histogram.
ts += 16
h = h.Copy()
h.Count = 32
h.ZeroCount++
h.Sum = 24.4
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
chk, _, _, err = app.AppendHistogram(nil, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
hExp := h.Copy()
hExp.CounterResetHint = histogram.NotCounterReset
exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat(nil)})
require.Equal(t, 2, c.NumSamples())
// Add update with new appender.
app, err = c.Appender()
require.NoError(t, err)
ts += 14
h = h.Copy()
h.Count = 54
h.ZeroCount += 2
h.Sum = 24.4
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
chk, _, _, err = app.AppendHistogram(nil, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
hExp = h.Copy()
hExp.CounterResetHint = histogram.NotCounterReset
exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat(nil)})
require.Equal(t, 3, c.NumSamples())
// 1. Expand iterator in simple case.
it := c.Iterator(nil)
require.NoError(t, it.Err())
var act []result
for it.Next() == ValHistogram {
ts, h := it.AtHistogram(nil)
fts, fh := it.AtFloatHistogram(nil)
require.Equal(t, ts, fts)
act = append(act, result{t: ts, h: h, fh: fh})
}
require.NoError(t, it.Err())
require.Equal(t, exp, act)
// 2. Expand second iterator while reusing first one.
it2 := c.Iterator(it)
var act2 []result
for it2.Next() == ValHistogram {
ts, h := it2.AtHistogram(nil)
fts, fh := it2.AtFloatHistogram(nil)
require.Equal(t, ts, fts)
act2 = append(act2, result{t: ts, h: h, fh: fh})
}
require.NoError(t, it2.Err())
require.Equal(t, exp, act2)
// 3. Now recycle an iterator that was never used to access anything.
itX := c.Iterator(nil)
for itX.Next() == ValHistogram {
// Just iterate through without accessing anything.
}
it3 := c.iterator(itX)
var act3 []result
for it3.Next() == ValHistogram {
ts, h := it3.AtHistogram(nil)
fts, fh := it3.AtFloatHistogram(nil)
require.Equal(t, ts, fts)
act3 = append(act3, result{t: ts, h: h, fh: fh})
}
require.NoError(t, it3.Err())
require.Equal(t, exp, act3)
// 4. Test iterator Seek.
mid := len(exp) / 2
it4 := c.Iterator(nil)
var act4 []result
require.Equal(t, ValHistogram, it4.Seek(exp[mid].t))
// Below ones should not matter.
require.Equal(t, ValHistogram, it4.Seek(exp[mid].t))
require.Equal(t, ValHistogram, it4.Seek(exp[mid].t))
ts, h = it4.AtHistogram(nil)
fts, fh := it4.AtFloatHistogram(nil)
require.Equal(t, ts, fts)
act4 = append(act4, result{t: ts, h: h, fh: fh})
for it4.Next() == ValHistogram {
ts, h := it4.AtHistogram(nil)
fts, fh := it4.AtFloatHistogram(nil)
require.Equal(t, ts, fts)
act4 = append(act4, result{t: ts, h: h, fh: fh})
}
require.NoError(t, it4.Err())
require.Equal(t, exp[mid:], act4)
require.Equal(t, ValNone, it4.Seek(exp[len(exp)-1].t+1))
}
// Mimics the scenario described for expandIntSpansAndBuckets.
func TestHistogramChunkBucketChanges(t *testing.T) {
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts1 := int64(1234567890)
h1 := &histogram.Histogram{
Count: 27,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
NegativeSpans: []histogram.Span{{Offset: 1, Length: 1}},
NegativeBuckets: []int64{1},
}
chk, _, app, err := app.AppendHistogram(nil, ts1, h1, false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
// Add a new histogram that has expanded buckets.
ts2 := ts1 + 16
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.NegativeSpans = []histogram.Span{{Offset: 0, Length: 2}}
h2.Count = 35
h2.ZeroCount++
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
// Existing histogram should get values converted from the above to:
// 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
// This is how span changes will be handled.
hApp, _ := app.(*HistogramAppender)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.NotEmpty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.Equal(t, NotCounterReset, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
chk, _, _, err = app.AppendHistogram(nil, ts2, h2, false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 2, c.NumSamples())
// Because the 2nd histogram has expanded buckets, we should expect all
// histograms (in particular the first) to come back using the new spans
// metadata as well as the expanded buckets.
h1.PositiveSpans = h2.PositiveSpans
h1.PositiveBuckets = []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1}
h1.NegativeSpans = h2.NegativeSpans
h1.NegativeBuckets = []int64{0, 1}
hExp := h2.Copy()
hExp.CounterResetHint = histogram.NotCounterReset
exp := []result{
{t: ts1, h: h1, fh: h1.ToFloat(nil)},
{t: ts2, h: hExp, fh: hExp.ToFloat(nil)},
}
it := c.Iterator(nil)
var act []result
for it.Next() == ValHistogram {
ts, h := it.AtHistogram(nil)
fts, fh := it.AtFloatHistogram(nil)
require.Equal(t, ts, fts)
act = append(act, result{t: ts, h: h, fh: fh})
}
require.NoError(t, it.Err())
require.Equal(t, exp, act)
}
func TestHistogramChunkAppendable(t *testing.T) {
eh := &histogram.Histogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
}
cbh := &histogram.Histogram{
Count: 24,
Sum: 18.4,
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
setup := func(h *histogram.Histogram) (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
require.Equal(t, UnknownCounterReset, c.(*HistogramChunk).GetCounterResetHeader())
return c, app.(*HistogramAppender), ts, h
}
{ // Schema change.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Schema++
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset)
}
{ // Zero threshold change.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has more buckets.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 9
h2.ZeroCount++
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.Equal(t, NotCounterReset, cr)
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // New histogram that has a bucket missing.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 5, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 21
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.Equal(t, CounterReset, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12)
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9.
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 1},
{Offset: 4, Length: 1},
{Offset: 1, Length: 1},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []int64{7, -5, 1, 0, 1} // counts: 7, 2, 3, 3, 4 (total 18)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.Equal(t, NotCounterReset, cr)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 4 (total 18)
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has new buckets AND buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12)
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9.
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 1},
{Offset: 4, Length: 1},
{Offset: 1, Length: 2},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []int64{7, -5, 1, 0, 1, 1} // counts: 7, 2, 3, 3, 4, 5 (total 23)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.Equal(t, NotCounterReset, cr)
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 5 (total 23)
require.Equal(t, []histogram.Span{
{Offset: 0, Length: 2}, // Added empty bucket.
{Offset: 2, Length: 1}, // Existing - offset adjusted.
{Offset: 3, Length: 2}, // Added empty bucket.
{Offset: 3, Length: 1}, // Existing - offset adjusted.
{Offset: 1, Length: 2}, // Existing.
}, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.Equal(t, CounterReset, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Sum = 29
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.Equal(t, CounterReset, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{
// New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket. (to
// catch the edge case where the new bucket should be forwarded
// ahead until first old bucket at start)
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2},
{Offset: 1, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 26
// Existing histogram should get values converted from the above to:
// 0, 0, 6, 3, 3, 2, 4, 5, 1
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.Equal(t, CounterReset, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has an explicit counter reset.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.CounterResetHint = histogram.CounterReset
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
_, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() // Identity is appendable.
nextChunk := NewHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, app, newApp)
assertSampleCount(t, nextChunk, 1, ValHistogram)
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
assertFirstIntHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset)
}
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
_, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Count-- // Make this not appendable due to counter reset.
nextChunk := NewHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, app, newApp)
assertSampleCount(t, nextChunk, 1, ValHistogram)
require.Equal(t, CounterReset, nextChunk.GetCounterResetHeader())
assertFirstIntHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset)
}
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
_, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 9
h2.ZeroCount++
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
nextChunk := NewHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, app, newApp)
assertSampleCount(t, nextChunk, 1, ValHistogram)
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
assertFirstIntHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset)
}
{
// Start a new chunk with a histogram that has an empty bucket.
// Add a histogram that has the same bucket missing.
// This should be appendable and can happen if we are merging from chunks
// where the first sample came from a recoded chunk that added the
// empty bucket.
h1 := eh.Copy()
// Add a bucket that is empty -10 offsets from the first bucket.
h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1)
h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1}
h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length}
for i, v := range eh.PositiveSpans[1:] {
h1.PositiveSpans[i+2] = v
}
h1.PositiveBuckets = make([]int64, len(eh.PositiveBuckets)+1)
h1.PositiveBuckets[0] = 0
for i, v := range eh.PositiveBuckets {
h1.PositiveBuckets[i+1] = v
}
c, hApp, ts, _ := setup(h1)
h2 := eh.Copy()
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.Equal(t, NotCounterReset, cr)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, no change.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, increase in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3}
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, decrease in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5}
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // Custom buckets, change only in custom bounds.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // Custom buckets, with more buckets.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 6
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.Equal(t, NotCounterReset, cr)
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // New histogram with a different schema.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Schema = 2
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.Equal(t, UnknownCounterReset, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset)
}
{ // New histogram with a different schema.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.ZeroThreshold = 1e-120
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.Equal(t, UnknownCounterReset, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset)
}
}
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader, expectHint histogram.CounterResetHint) {
oldChunkBytes := oldChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.False(t, recoded)
require.NotEqual(t, oldChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*HistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 1, ValHistogram)
assertFirstIntHistogramSampleHint(t, newChunk, expectHint)
}
func assertNoNewHistogramChunkOnAppend(t *testing.T, currChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
prevChunkBytes := currChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
require.Greater(t, len(currChunk.Bytes()), len(prevChunkBytes)) // Check that current chunk is bigger than previously.
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, expectHeader, currChunk.(*HistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.Equal(t, hApp, newAppender)
assertSampleCount(t, currChunk, 2, ValHistogram)
}
func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
prevChunkBytes := prevChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
require.Equal(t, prevChunkBytes, prevChunk.Bytes()) // Sanity check that previous chunk is untouched. This may change in the future if we implement in-place recoding.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.True(t, recoded)
require.NotEqual(t, prevChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*HistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 2, ValHistogram)
}
func assertSampleCount(t *testing.T, c Chunk, exp int64, vtype ValueType) {
count := int64(0)
it := c.Iterator(nil)
require.NoError(t, it.Err())
for it.Next() == vtype {
count++
}
require.NoError(t, it.Err())
require.Equal(t, exp, count)
}
func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
tests := map[string]struct {
h1 *histogram.Histogram
h2 *histogram.Histogram
}{
"empty span in old and new histogram": {
h1: &histogram.Histogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 1, -1, 1, 0, 0, 0},
},
h2: &histogram.Histogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
},
"empty span in old histogram": {
h1: &histogram.Histogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 0}, // This span will disappear.
{Offset: 2, Length: 4},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 1, -1, 1, 0, 0, 0},
},
h2: &histogram.Histogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 3, Length: 4},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
},
"empty span in new histogram": {
h1: &histogram.Histogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 3, Length: 3},
},
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/xor_test.go | tsdb/chunkenc/xor_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"testing"
"github.com/stretchr/testify/require"
)
func BenchmarkXorRead(b *testing.B) {
c := NewXORChunk()
app, err := c.Appender()
require.NoError(b, err)
for i := int64(0); i < 120*1000; i += 1000 {
app.Append(i, float64(i)+float64(i)/10+float64(i)/100+float64(i)/1000)
}
b.ReportAllocs()
var it Iterator
for b.Loop() {
var ts int64
var v float64
it = c.Iterator(it)
for it.Next() != ValNone {
ts, v = it.At()
}
_, _ = ts, v
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/varbit_test.go | tsdb/chunkenc/varbit_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"math"
"testing"
"github.com/stretchr/testify/require"
)
func TestVarbitInt(t *testing.T) {
numbers := []int64{
math.MinInt64,
-36028797018963968, -36028797018963967,
-16777216, -16777215,
-131072, -131071,
-2048, -2047,
-256, -255,
-32, -31,
-4, -3,
-1, 0, 1,
4, 5,
32, 33,
256, 257,
2048, 2049,
131072, 131073,
16777216, 16777217,
36028797018963968, 36028797018963969,
math.MaxInt64,
}
bs := bstream{}
for _, n := range numbers {
putVarbitInt(&bs, n)
}
bsr := newBReader(bs.bytes())
for _, want := range numbers {
got, err := readVarbitInt(&bsr)
require.NoError(t, err)
require.Equal(t, want, got)
}
}
func TestVarbitUint(t *testing.T) {
numbers := []uint64{
0, 1,
7, 8,
63, 64,
511, 512,
4095, 4096,
262143, 262144,
33554431, 33554432,
72057594037927935, 72057594037927936,
math.MaxUint64,
}
bs := bstream{}
for _, n := range numbers {
putVarbitUint(&bs, n)
}
bsr := newBReader(bs.bytes())
for _, want := range numbers {
got, err := readVarbitUint(&bsr)
require.NoError(t, err)
require.Equal(t, want, got)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/float_histogram_test.go | tsdb/chunkenc/float_histogram_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
type floatResult struct {
t int64
h *histogram.FloatHistogram
}
func TestFirstFloatHistogramExplicitCounterReset(t *testing.T) {
tests := map[string]struct {
hint histogram.CounterResetHint
expHeader CounterResetHeader
expHint histogram.CounterResetHint
}{
"CounterReset": {
hint: histogram.CounterReset,
expHeader: CounterReset,
expHint: histogram.UnknownCounterReset,
},
"NotCounterReset": {
hint: histogram.NotCounterReset,
expHeader: UnknownCounterReset,
expHint: histogram.UnknownCounterReset,
},
"UnknownCounterReset": {
hint: histogram.UnknownCounterReset,
expHeader: UnknownCounterReset,
expHint: histogram.UnknownCounterReset,
},
"Gauge": {
hint: histogram.GaugeType,
expHeader: GaugeType,
expHint: histogram.GaugeType,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
h := &histogram.FloatHistogram{
CounterResetHint: test.hint,
}
chk := NewFloatHistogramChunk()
app, err := chk.Appender()
require.NoError(t, err)
newChk, recoded, newApp, err := app.AppendFloatHistogram(nil, 0, h, false)
require.NoError(t, err)
require.Nil(t, newChk)
require.False(t, recoded)
require.Equal(t, app, newApp)
require.Equal(t, test.expHeader, chk.GetCounterResetHeader())
assertFirstFloatHistogramSampleHint(t, chk, test.expHint)
})
}
}
func TestFloatHistogramChunkSameBuckets(t *testing.T) {
c := NewFloatHistogramChunk()
var exp []floatResult
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h := &histogram.Histogram{
Count: 15,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-100,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0}, // counts: 1, 2, 1, 1 (total 5)
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 1},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8)
}
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
exp = append(exp, floatResult{t: ts, h: h.ToFloat(nil)})
require.Equal(t, 1, c.NumSamples())
// Add an updated histogram.
ts += 16
h = h.Copy()
h.Count = 32
h.ZeroCount++
h.Sum = 24.4
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
expH := h.ToFloat(nil)
expH.CounterResetHint = histogram.NotCounterReset
exp = append(exp, floatResult{t: ts, h: expH})
require.Equal(t, 2, c.NumSamples())
// Add update with new appender.
app, err = c.Appender()
require.NoError(t, err)
ts += 14
h = h.Copy()
h.Count = 54
h.ZeroCount += 2
h.Sum = 24.4
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
expH = h.ToFloat(nil)
expH.CounterResetHint = histogram.NotCounterReset
exp = append(exp, floatResult{t: ts, h: expH})
require.Equal(t, 3, c.NumSamples())
// 1. Expand iterator in simple case.
it := c.Iterator(nil)
require.NoError(t, it.Err())
var act []floatResult
for it.Next() == ValFloatHistogram {
fts, fh := it.AtFloatHistogram(nil)
act = append(act, floatResult{t: fts, h: fh})
}
require.NoError(t, it.Err())
require.Equal(t, exp, act)
// 2. Expand second iterator while reusing first one.
it2 := c.Iterator(it)
var act2 []floatResult
for it2.Next() == ValFloatHistogram {
fts, fh := it2.AtFloatHistogram(nil)
act2 = append(act2, floatResult{t: fts, h: fh})
}
require.NoError(t, it2.Err())
require.Equal(t, exp, act2)
// 3. Now recycle an iterator that was never used to access anything.
itX := c.Iterator(nil)
for itX.Next() == ValFloatHistogram {
// Just iterate through without accessing anything.
}
it3 := c.iterator(itX)
var act3 []floatResult
for it3.Next() == ValFloatHistogram {
fts, fh := it3.AtFloatHistogram(nil)
act3 = append(act3, floatResult{t: fts, h: fh})
}
require.NoError(t, it3.Err())
require.Equal(t, exp, act3)
// 4. Test iterator Seek.
mid := len(exp) / 2
it4 := c.Iterator(nil)
var act4 []floatResult
require.Equal(t, ValFloatHistogram, it4.Seek(exp[mid].t))
// Below ones should not matter.
require.Equal(t, ValFloatHistogram, it4.Seek(exp[mid].t))
require.Equal(t, ValFloatHistogram, it4.Seek(exp[mid].t))
fts, fh := it4.AtFloatHistogram(nil)
act4 = append(act4, floatResult{t: fts, h: fh})
for it4.Next() == ValFloatHistogram {
fts, fh := it4.AtFloatHistogram(nil)
act4 = append(act4, floatResult{t: fts, h: fh})
}
require.NoError(t, it4.Err())
require.Equal(t, exp[mid:], act4)
require.Equal(t, ValNone, it4.Seek(exp[len(exp)-1].t+1))
}
// Mimics the scenario described for expandFloatSpansAndBuckets.
func TestFloatHistogramChunkBucketChanges(t *testing.T) {
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts1 := int64(1234567890)
h1 := &histogram.Histogram{
Count: 27,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
NegativeSpans: []histogram.Span{{Offset: 1, Length: 1}},
NegativeBuckets: []int64{1},
}
chk, _, app, err := app.AppendFloatHistogram(nil, ts1, h1.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
// Add a new histogram that has expanded buckets.
ts2 := ts1 + 16
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.NegativeSpans = []histogram.Span{{Offset: 0, Length: 2}}
h2.Count = 35
h2.ZeroCount++
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
// Existing histogram should get values converted from the above to:
// 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
// This is how span changes will be handled.
hApp, _ := app.(*FloatHistogramAppender)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2.ToFloat(nil))
require.NotEmpty(t, posInterjections)
require.NotEmpty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
chk, _, _, err = app.AppendFloatHistogram(nil, ts2, h2.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 2, c.NumSamples())
// Because the 2nd histogram has expanded buckets, we should expect all
// histograms (in particular the first) to come back using the new spans
// metadata as well as the expanded buckets.
h1.PositiveSpans = h2.PositiveSpans
h1.PositiveBuckets = []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1}
h1.NegativeSpans = h2.NegativeSpans
h1.NegativeBuckets = []int64{0, 1}
expH2 := h2.ToFloat(nil)
expH2.CounterResetHint = histogram.NotCounterReset
exp := []floatResult{
{t: ts1, h: h1.ToFloat(nil)},
{t: ts2, h: expH2},
}
it := c.Iterator(nil)
var act []floatResult
for it.Next() == ValFloatHistogram {
fts, fh := it.AtFloatHistogram(nil)
act = append(act, floatResult{t: fts, h: fh})
}
require.NoError(t, it.Err())
require.Equal(t, exp, act)
}
func TestFloatHistogramChunkAppendable(t *testing.T) {
eh := &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
cbh := &histogram.FloatHistogram{
Count: 24,
Sum: 18.4,
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
setup := func(h *histogram.FloatHistogram) (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
require.Equal(t, UnknownCounterReset, c.(*FloatHistogramChunk).GetCounterResetHeader())
return c, app.(*FloatHistogramAppender), ts, h
}
{ // Schema change.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Schema++
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset)
}
{ // Zero threshold change.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has more buckets.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 9
h2.ZeroCount++
h2.Sum = 30
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // New histogram that has a bucket missing.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 5, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 21
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1}
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 2},
{Offset: 5, Length: 1},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []float64{7, 4, 3, 5, 2}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2}, h2.PositiveBuckets)
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has new buckets AND buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1}
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 2},
{Offset: 5, Length: 2},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []float64{7, 4, 3, 5, 2, 3}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2, 3}, h2.PositiveBuckets)
require.Equal(t, []histogram.Span{
{Offset: 0, Length: 2}, // Added empty bucket.
{Offset: 2, Length: 1}, // Existing - offset adjusted.
{Offset: 3, Length: 2}, // Existing.
{Offset: 3, Length: 1}, // Added empty bucket.
{Offset: 1, Length: 2}, // Existing + the extra bucket.
}, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Sum = 29
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{
c, hApp, ts, h1 := setup(eh)
// New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket. (to
// catch the edge case where the new bucket should be forwarded
// ahead until first old bucket at start)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2},
{Offset: 1, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 26
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // New histogram that has an explicit counter reset.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.CounterResetHint = histogram.CounterReset
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
_, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() // Identity is appendable.
nextChunk := NewFloatHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, app, newApp)
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
assertFirstFloatHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset)
}
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
_, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Count-- // Make this not appendable due to counter reset.
nextChunk := NewFloatHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, app, newApp)
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
require.Equal(t, CounterReset, nextChunk.GetCounterResetHeader())
assertFirstFloatHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset)
}
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
_, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 9
h2.ZeroCount++
h2.Sum = 30
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
nextChunk := NewFloatHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, app, newApp)
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
assertFirstFloatHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset)
}
{
// Start a new chunk with a histogram that has an empty bucket.
// Add a histogram that has the same bucket missing.
// This should be appendable and can happen if we are merging from chunks
// where the first sample came from a recoded chunk that added the
// empty bucket.
h1 := eh.Copy()
// Add a bucket that is empty -10 offsets from the first bucket.
h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1)
h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1}
h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length}
for i, v := range eh.PositiveSpans[1:] {
h1.PositiveSpans[i+2] = v
}
h1.PositiveBuckets = make([]float64, len(eh.PositiveBuckets)+1)
h1.PositiveBuckets[0] = 0
for i, v := range eh.PositiveBuckets {
h1.PositiveBuckets[i+1] = v
}
c, hApp, ts, _ := setup(h1)
h2 := eh.Copy()
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, no change.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, increase in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2}
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, decrease in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0}
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // Custom buckets, change only in custom bounds.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset)
}
{ // Custom buckets, with more buckets.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 6
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
}
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader, expectHint histogram.CounterResetHint) {
oldChunkBytes := oldChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.False(t, recoded)
require.NotEqual(t, oldChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*FloatHistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 1, ValFloatHistogram)
assertFirstFloatHistogramSampleHint(t, newChunk, expectHint)
}
func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
oldChunkBytes := oldChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
require.Greater(t, len(oldChunk.Bytes()), len(oldChunkBytes)) // Check that current chunk is bigger than previously.
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, expectHeader, oldChunk.(*FloatHistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.Equal(t, hApp, newAppender)
assertSampleCount(t, oldChunk, 2, ValFloatHistogram)
}
func assertRecodedFloatHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
prevChunkBytes := prevChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
require.Equal(t, prevChunkBytes, prevChunk.Bytes()) // Sanity check that previous chunk is untouched. This may change in the future if we implement in-place recoding.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.True(t, recoded)
require.NotEqual(t, prevChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*FloatHistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 2, ValFloatHistogram)
}
func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
tests := map[string]struct {
h1 *histogram.FloatHistogram
h2 *histogram.FloatHistogram
}{
"empty span in old and new histogram": {
h1: &histogram.FloatHistogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
},
h2: &histogram.FloatHistogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
},
},
"empty span in old histogram": {
h1: &histogram.FloatHistogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 0}, // This span will disappear.
{Offset: 2, Length: 4},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
},
h2: &histogram.FloatHistogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 3, Length: 4},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
},
},
"empty span in new histogram": {
h1: &histogram.FloatHistogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 3, Length: 3},
},
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
},
h2: &histogram.FloatHistogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 1, Length: 0}, // This span is new.
{Offset: 2, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
},
},
"two empty spans mixing offsets": {
h1: &histogram.FloatHistogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 1, Length: 0},
{Offset: 3, Length: 0},
{Offset: 4, Length: 3},
},
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
},
h2: &histogram.FloatHistogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 3, Length: 0},
{Offset: 1, Length: 0},
{Offset: 4, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
},
},
"empty span in old and new custom buckets histogram": {
h1: &histogram.FloatHistogram{
Schema: histogram.CustomBucketsSchema,
Count: 7,
Sum: 1234.5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
},
h2: &histogram.FloatHistogram{
Schema: histogram.CustomBucketsSchema,
Count: 10,
Sum: 2345.6,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
_, _, _, err = app.AppendFloatHistogram(nil, 1, tc.h1, true)
require.NoError(t, err)
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, bpI, bnI, okToAppend, counterReset := hApp.appendable(tc.h2)
require.Empty(t, pI)
require.Empty(t, nI)
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/histogram_meta_test.go | tsdb/chunkenc/histogram_meta_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below.
// It was modified to accommodate reading from byte slices without modifying
// the underlying bytes, which would panic when reading from mmapped
// read-only byte slices.
package chunkenc
import (
"math"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
)
// Example of a span layout and resulting bucket indices (_idx_ is used in this
// histogram, others are shown just for context):
//
// spans : [offset: 0, length: 2] [offset 1, length 1]
// bucket idx : _0_ _1_ 2 [3] 4 ...
func TestBucketIterator(t *testing.T) {
type test struct {
spans []histogram.Span
idxs []int
}
tests := []test{
{
spans: []histogram.Span{
{
Offset: 0,
Length: 1,
},
},
idxs: []int{0},
},
{
spans: []histogram.Span{
{
Offset: 0,
Length: 2,
},
{
Offset: 1,
Length: 1,
},
},
idxs: []int{0, 1, 3},
},
{
spans: []histogram.Span{
{
Offset: 100,
Length: 4,
},
{
Offset: 8,
Length: 7,
},
{
Offset: 0,
Length: 1,
},
},
idxs: []int{100, 101, 102, 103, 112, 113, 114, 115, 116, 117, 118, 119},
},
// The below 2 sets ore the ones described in expandFloatSpansAndBuckets's comments.
{
spans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
idxs: []int{0, 1, 4, 8, 9, 13, 15},
},
{
spans: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
},
idxs: []int{0, 1, 2, 4, 6, 7, 8, 9, 13, 14, 15},
},
}
for _, test := range tests {
b := newBucketIterator(test.spans)
var got []int
v, ok := b.Next()
for ok {
got = append(got, v)
v, ok = b.Next()
}
require.Equal(t, test.idxs, got)
}
}
func TestExpandSpansBothWaysAndInsert(t *testing.T) {
scenarios := []struct {
description string
spansA, spansB []histogram.Span
fInserts, bInserts []Insert
bucketsIn, bucketsOut []int64
mergedSpans []histogram.Span
}{
{
description: "single prepend at the beginning",
spansA: []histogram.Span{
{Offset: -10, Length: 3},
},
spansB: []histogram.Span{
{Offset: -11, Length: 4},
},
fInserts: []Insert{
{
pos: 0,
num: 1,
},
},
bucketsIn: []int64{6, -3, 0},
bucketsOut: []int64{0, 6, -3, 0},
mergedSpans: []histogram.Span{
{Offset: -11, Length: 4},
},
},
{
description: "single append at the end",
spansA: []histogram.Span{
{Offset: -10, Length: 3},
},
spansB: []histogram.Span{
{Offset: -10, Length: 4},
},
fInserts: []Insert{
{
pos: 3,
num: 1,
},
},
bucketsIn: []int64{6, -3, 0},
bucketsOut: []int64{6, -3, 0, -3},
mergedSpans: []histogram.Span{
{Offset: -10, Length: 4},
},
},
{
description: "double prepend at the beginning",
spansA: []histogram.Span{
{Offset: -10, Length: 3},
},
spansB: []histogram.Span{
{Offset: -12, Length: 5},
},
fInserts: []Insert{
{
pos: 0,
num: 2,
},
},
bucketsIn: []int64{6, -3, 0},
bucketsOut: []int64{0, 0, 6, -3, 0},
mergedSpans: []histogram.Span{
{Offset: -12, Length: 5},
},
},
{
description: "double append at the end",
spansA: []histogram.Span{
{Offset: -10, Length: 3},
},
spansB: []histogram.Span{
{Offset: -10, Length: 5},
},
fInserts: []Insert{
{
pos: 3,
num: 2,
},
},
bucketsIn: []int64{6, -3, 0},
bucketsOut: []int64{6, -3, 0, -3, 0},
mergedSpans: []histogram.Span{
{Offset: -10, Length: 5},
},
},
{
description: "double prepond at the beginning and double append at the end",
spansA: []histogram.Span{
{Offset: -10, Length: 3},
},
spansB: []histogram.Span{
{Offset: -12, Length: 7},
},
fInserts: []Insert{
{
pos: 0,
num: 2,
},
{
pos: 3,
num: 2,
},
},
bucketsIn: []int64{6, -3, 0},
bucketsOut: []int64{0, 0, 6, -3, 0, -3, 0},
mergedSpans: []histogram.Span{
{Offset: -12, Length: 7},
},
},
{
description: "single removal of bucket at the start",
spansA: []histogram.Span{
{Offset: -10, Length: 4},
},
spansB: []histogram.Span{
{Offset: -9, Length: 3},
},
bInserts: []Insert{
{pos: 0, num: 1},
},
bucketsIn: []int64{1, 2, -1, 2},
bucketsOut: []int64{1, 2, -1, 2},
mergedSpans: []histogram.Span{
{Offset: -10, Length: 4},
},
},
{
description: "single removal of bucket in the middle",
spansA: []histogram.Span{
{Offset: -10, Length: 4},
},
spansB: []histogram.Span{
{Offset: -10, Length: 2},
{Offset: 1, Length: 1},
},
bInserts: []Insert{
{pos: 2, num: 1},
},
bucketsIn: []int64{1, 2, -1, 2},
bucketsOut: []int64{1, 2, -1, 2},
mergedSpans: []histogram.Span{
{Offset: -10, Length: 4},
},
},
{
description: "single removal of bucket at the end",
spansA: []histogram.Span{
{Offset: -10, Length: 4},
},
spansB: []histogram.Span{
{Offset: -10, Length: 3},
},
bInserts: []Insert{
{pos: 3, num: 1},
},
mergedSpans: []histogram.Span{
{Offset: -10, Length: 4},
},
bucketsIn: []int64{1, 2, -1, 2},
bucketsOut: []int64{1, 2, -1, 2},
},
{
description: "as described in doc comment",
spansA: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
spansB: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
},
fInserts: []Insert{
{
pos: 2,
num: 1,
},
{
pos: 3,
num: 2,
},
{
pos: 6,
num: 1,
},
},
bucketsIn: []int64{6, -3, 0, -1, 2, 1, -4},
bucketsOut: []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1},
mergedSpans: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
},
},
{
description: "both forward and backward inserts, complex case",
spansA: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
spansB: []histogram.Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 1},
{Offset: 1, Length: 2},
{Offset: 1, Length: 1},
{Offset: 4, Length: 1},
},
fInserts: []Insert{
{
pos: 2,
num: 1,
},
{
pos: 3,
num: 2,
},
{
pos: 6,
num: 1,
},
},
bInserts: []Insert{
{
pos: 0,
num: 1,
},
{
pos: 5,
num: 1,
},
{
pos: 6,
num: 1,
},
{
pos: 7,
num: 1,
},
},
bucketsIn: []int64{1, 2, -1, 2, 0, 3, 1},
bucketsOut: []int64{1, 2, -3, 2, -2, 0, 4, 0, 3, -7, 8},
mergedSpans: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
},
},
{
description: "inserts with gaps",
spansA: []histogram.Span{
{Offset: -19, Length: 2},
{Offset: 1, Length: 2},
},
spansB: []histogram.Span{
{Offset: -19, Length: 1},
{Offset: 4, Length: 1},
{Offset: 3, Length: 1},
},
fInserts: []Insert{
{pos: 4, num: 2},
},
bInserts: []Insert{
{pos: 1, num: 3},
},
bucketsIn: []int64{1, 2, -1, 1},
bucketsOut: []int64{1, 2, -1, 1, -3, 0},
mergedSpans: []histogram.Span{
{Offset: -19, Length: 2},
{Offset: 1, Length: 3},
{Offset: 3, Length: 1},
},
},
}
for _, s := range scenarios {
t.Run(s.description, func(t *testing.T) {
fInserts, bInserts, m := expandSpansBothWays(s.spansA, s.spansB)
require.Equal(t, s.fInserts, fInserts)
require.Equal(t, s.bInserts, bInserts)
require.Equal(t, s.mergedSpans, m)
gotBuckets := make([]int64, len(s.bucketsOut))
insert(s.bucketsIn, gotBuckets, fInserts, true)
require.Equal(t, s.bucketsOut, gotBuckets)
floatBucketsIn := make([]float64, len(s.bucketsIn))
last := s.bucketsIn[0]
floatBucketsIn[0] = float64(last)
for i := 1; i < len(floatBucketsIn); i++ {
last += s.bucketsIn[i]
floatBucketsIn[i] = float64(last)
}
floatBucketsOut := make([]float64, len(s.bucketsOut))
last = s.bucketsOut[0]
floatBucketsOut[0] = float64(last)
for i := 1; i < len(floatBucketsOut); i++ {
last += s.bucketsOut[i]
floatBucketsOut[i] = float64(last)
}
gotFloatBuckets := make([]float64, len(floatBucketsOut))
insert(floatBucketsIn, gotFloatBuckets, fInserts, false)
require.Equal(t, floatBucketsOut, gotFloatBuckets)
})
}
}
func TestWriteReadHistogramChunkLayout(t *testing.T) {
layouts := []struct {
schema int32
zeroThreshold float64
positiveSpans, negativeSpans []histogram.Span
customValues []float64
}{
{
schema: 3,
zeroThreshold: 0,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
},
{
schema: -2,
zeroThreshold: 2.938735877055719e-39, // Default value in client_golang.
positiveSpans: nil,
negativeSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}},
},
{
schema: 6,
zeroThreshold: 1024, // The largest power of two we can encode in one byte.
positiveSpans: nil,
negativeSpans: nil,
},
{
schema: 6,
zeroThreshold: 1025,
positiveSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}, {Offset: 0, Length: 0}}, // Weird span.
negativeSpans: []histogram.Span{{Offset: -345, Length: 4545}, {Offset: 53645665, Length: 345}, {Offset: 945995, Length: 85848}},
},
{
schema: 6,
zeroThreshold: 2048,
positiveSpans: nil,
negativeSpans: nil,
},
{
schema: 0,
zeroThreshold: math.Ldexp(0.5, -242), // The smallest power of two we can encode in one byte.
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}},
negativeSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}},
},
{
schema: 0,
zeroThreshold: math.Ldexp(0.5, -243),
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}},
negativeSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}},
},
{
schema: 4,
zeroThreshold: 42, // Not a power of two.
positiveSpans: nil,
negativeSpans: nil,
},
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{-5, -2.5, 0, 0.1, 0.25, 0.5, 1, 2, 5, 10, 25, 50, 100, 255, 500, 1000, 50000, 1e7},
},
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0},
},
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192},
},
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{1.001, 1.023, 2.01, 4.007, 4.095, 8.001, 8.19, 16.24},
},
}
bs := bstream{}
for _, l := range layouts {
writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans, l.customValues)
}
bsr := newBReader(bs.bytes())
for _, want := range layouts {
gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, gotCustomBounds, err := readHistogramChunkLayout(&bsr)
require.NoError(t, err)
require.Equal(t, want.schema, gotSchema)
require.Equal(t, want.zeroThreshold, gotZeroThreshold)
require.Equal(t, want.positiveSpans, gotPositiveSpans)
require.Equal(t, want.negativeSpans, gotNegativeSpans)
require.Equal(t, want.customValues, gotCustomBounds)
}
}
func TestSpansFromBidirectionalCompareSpans(t *testing.T) {
cases := []struct {
s1, s2, exp []histogram.Span
}{
{ // All empty.
s1: []histogram.Span{},
s2: []histogram.Span{},
},
{ // Same spans.
s1: []histogram.Span{},
s2: []histogram.Span{},
},
{
// Has the cases of
// 1. |----| (partial overlap)
// |----|
//
// 2. |-----| (no gap but no overlap as well)
// |---|
//
// 3. |----| (complete overlap)
// |----|
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
s2: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
{Offset: 2, Length: 3},
{Offset: 3, Length: 3},
},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 7},
{Offset: 3, Length: 3},
},
},
{
// s1 is superset of s2.
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 5},
{Offset: 3, Length: 3},
},
s2: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 5, Length: 3},
{Offset: 4, Length: 3},
},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 5},
{Offset: 3, Length: 3},
},
},
{
// No overlaps but one span is side by side.
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
s2: []histogram.Span{
{Offset: 3, Length: 3},
{Offset: 4, Length: 2},
},
exp: []histogram.Span{
{Offset: 0, Length: 9},
{Offset: 1, Length: 2},
{Offset: 2, Length: 3},
},
},
{
// No buckets in one of them.
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
s2: []histogram.Span{},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
},
{ // Zero length spans.
s1: []histogram.Span{
{Offset: -5, Length: 0},
{Offset: 2, Length: 0},
{Offset: 3, Length: 3},
{Offset: 1, Length: 0},
{Offset: 2, Length: 3},
{Offset: 2, Length: 0},
{Offset: 2, Length: 0},
{Offset: 1, Length: 3},
{Offset: 4, Length: 0},
{Offset: 5, Length: 0},
},
s2: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
{Offset: 1, Length: 0},
{Offset: 1, Length: 3},
{Offset: 3, Length: 3},
},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 7},
{Offset: 3, Length: 3},
},
},
}
for _, c := range cases {
s1c := make([]histogram.Span, len(c.s1))
s2c := make([]histogram.Span, len(c.s2))
copy(s1c, c.s1)
copy(s2c, c.s2)
_, _, act := expandSpansBothWays(c.s1, c.s2)
require.Equal(t, c.exp, act)
// Check that s1 and s2 are not modified.
require.Equal(t, s1c, c.s1)
require.Equal(t, s2c, c.s2)
_, _, act = expandSpansBothWays(c.s2, c.s1)
require.Equal(t, c.exp, act)
}
}
func TestExpandIntOrFloatSpansAndBuckets(t *testing.T) {
testCases := map[string]struct {
spansA []histogram.Span
bucketsA []int64
spansB []histogram.Span
bucketsB []int64
expectReset bool
expectForwardInserts []Insert
expectBackwardInserts []Insert
expectMergedSpans []histogram.Span
expectBucketsA []int64
expectBucketsB []int64
}{
"empty": {
spansA: []histogram.Span{},
bucketsA: []int64{},
spansB: []histogram.Span{},
bucketsB: []int64{},
expectReset: false,
expectForwardInserts: nil,
expectBackwardInserts: nil,
expectMergedSpans: []histogram.Span{},
expectBucketsA: []int64{},
expectBucketsB: []int64{},
},
"single bucket reset to none": {
spansA: []histogram.Span{{Offset: 1, Length: 1}},
bucketsA: []int64{1},
spansB: []histogram.Span{},
bucketsB: []int64{},
expectReset: true,
},
"single bucket reset to lower": {
spansA: []histogram.Span{{Offset: 1, Length: 1}},
bucketsA: []int64{2},
spansB: []histogram.Span{{Offset: 1, Length: 1}},
bucketsB: []int64{1},
expectReset: true,
},
"single bucket increase": {
spansA: []histogram.Span{{Offset: 1, Length: 1}},
bucketsA: []int64{1},
spansB: []histogram.Span{{Offset: 1, Length: 1}},
bucketsB: []int64{2},
expectReset: false,
expectForwardInserts: nil,
expectBackwardInserts: nil,
expectMergedSpans: []histogram.Span{{Offset: 1, Length: 1}},
expectBucketsA: []int64{1},
expectBucketsB: []int64{2},
},
"distinct new buckets and increase": {
// A: ___1_____
// B: 22_22___2
// B': 22_22___2
spansA: []histogram.Span{{Offset: 1, Length: 1}},
bucketsA: []int64{1},
spansB: []histogram.Span{{Offset: -2, Length: 2}, {Offset: 1, Length: 2}, {Offset: 3, Length: 1}},
bucketsB: []int64{2, 0, 0, 0, 0},
expectReset: false,
expectForwardInserts: []Insert{{pos: 0, num: 2, bucketIdx: -2}, {pos: 1, num: 1, bucketIdx: 2}, {pos: 1, num: 1, bucketIdx: 6}},
expectBackwardInserts: nil,
expectMergedSpans: []histogram.Span{{Offset: -2, Length: 2}, {Offset: 1, Length: 2}, {Offset: 3, Length: 1}},
expectBucketsA: []int64{0, 0, 1, -1, 0},
expectBucketsB: []int64{2, 0, 0, 0, 0},
},
"distinct new buckets but reset": {
// A: ___2_____
// B: 11_11___1
spansA: []histogram.Span{{Offset: 1, Length: 1}},
bucketsA: []int64{2},
spansB: []histogram.Span{{Offset: -2, Length: 2}, {Offset: 1, Length: 2}, {Offset: 3, Length: 1}},
bucketsB: []int64{1, 0, 0, 0, 0},
expectReset: true,
},
"distinct new buckets but missing": {
// A: ___2_____
// B: 11__1___1
spansA: []histogram.Span{{Offset: 1, Length: 1}},
bucketsA: []int64{2},
spansB: []histogram.Span{{Offset: -2, Length: 2}, {Offset: 2, Length: 1}, {Offset: 3, Length: 1}},
bucketsB: []int64{1, 0, 0, 0},
expectReset: true,
},
"distinct new buckets and missing an empty bucket": {
// A: _0__
// B: ___1
// B': _0_1
spansA: []histogram.Span{{Offset: 1, Length: 1}},
bucketsA: []int64{0},
spansB: []histogram.Span{{Offset: 3, Length: 1}},
bucketsB: []int64{1},
expectReset: false,
expectForwardInserts: []Insert{{pos: 1, num: 1, bucketIdx: 3}},
expectBackwardInserts: []Insert{{pos: 0, num: 1, bucketIdx: 1}},
expectMergedSpans: []histogram.Span{{Offset: 1, Length: 1}, {Offset: 1, Length: 1}},
expectBucketsA: []int64{0, 0},
expectBucketsB: []int64{0, 1},
},
"distinct new buckets and missing multiple empty buckets": {
// Idx: 01234567890123
// A: _000_00__0__00
// B; ________1_____
// B': _000_00_10__00
spansA: []histogram.Span{{Offset: 1, Length: 3}, {Offset: 1, Length: 2}, {Offset: 2, Length: 1}, {Offset: 2, Length: 2}},
bucketsA: []int64{0, 0, 0, 0, 0, 0, 0, 0},
spansB: []histogram.Span{{Offset: 8, Length: 1}},
bucketsB: []int64{1},
expectReset: false,
expectForwardInserts: []Insert{{pos: 5, num: 1, bucketIdx: 8}},
expectBackwardInserts: []Insert{{pos: 0, num: 3, bucketIdx: 1}, {pos: 0, num: 2, bucketIdx: 5}, {pos: 1, num: 1, bucketIdx: 9}, {pos: 1, num: 2, bucketIdx: 12}},
expectMergedSpans: []histogram.Span{{Offset: 1, Length: 3}, {Offset: 1, Length: 2}, {Offset: 1, Length: 2}, {Offset: 2, Length: 2}},
expectBucketsA: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0},
expectBucketsB: []int64{0, 0, 0, 0, 0, 1, -1, 0, 0},
},
"overlap new buckets and missing multiple empty buckets": {
// Idx: 01234567890123
// A: _000_00_10__00
// B; ________2_____
// B': _000_00_20__00
spansA: []histogram.Span{{Offset: 1, Length: 3}, {Offset: 1, Length: 2}, {Offset: 1, Length: 2}, {Offset: 2, Length: 2}},
bucketsA: []int64{0, 0, 0, 0, 0, 1, -1, 0, 0},
spansB: []histogram.Span{{Offset: 8, Length: 1}},
bucketsB: []int64{2},
expectReset: false,
expectForwardInserts: nil,
expectBackwardInserts: []Insert{{pos: 0, num: 3, bucketIdx: 1}, {pos: 0, num: 2, bucketIdx: 5}, {pos: 1, num: 1, bucketIdx: 9}, {pos: 1, num: 2, bucketIdx: 12}},
expectMergedSpans: []histogram.Span{{Offset: 1, Length: 3}, {Offset: 1, Length: 2}, {Offset: 1, Length: 2}, {Offset: 2, Length: 2}},
expectBucketsA: []int64{0, 0, 0, 0, 0, 1, -1, 0, 0},
expectBucketsB: []int64{0, 0, 0, 0, 0, 2, -2, 0, 0},
},
"overlap new buckets and missing multiple empty buckets with 0 length/offset spans": {
// Idx: 01234567890123
// A: _000_00_10__00
// B; ________2_____
// B': _000_00_20__00
spansA: []histogram.Span{{Offset: 1, Length: 3}, {Offset: 1, Length: 2}, {Offset: 1, Length: 2}, {Offset: 1, Length: 0}, {Offset: 1, Length: 2}},
bucketsA: []int64{0, 0, 0, 0, 0, 1, -1, 0, 0},
spansB: []histogram.Span{{Offset: 1, Length: 0}, {Offset: 7, Length: 1}},
bucketsB: []int64{2},
expectReset: false,
expectForwardInserts: nil,
expectBackwardInserts: []Insert{{pos: 0, num: 3, bucketIdx: 1}, {pos: 0, num: 2, bucketIdx: 5}, {pos: 1, num: 1, bucketIdx: 9}, {pos: 1, num: 2, bucketIdx: 12}},
expectMergedSpans: []histogram.Span{{Offset: 1, Length: 3}, {Offset: 1, Length: 2}, {Offset: 1, Length: 2}, {Offset: 2, Length: 2}},
expectBucketsA: []int64{0, 0, 0, 0, 0, 1, -1, 0, 0},
expectBucketsB: []int64{0, 0, 0, 0, 0, 2, -2, 0, 0},
},
"new empty buckets between filled buckets": {
// A: 11212332____1__1
// B: 122323321__11__1
// A': 112123320__01__1
// B': 122323321__11__1
spansA: []histogram.Span{{Offset: -51, Length: 8}, {Offset: 11, Length: 1}, {Offset: 14, Length: 1}},
bucketsA: []int64{1, 0, 1, -1, 1, 1, 0, -1, -1, 0},
spansB: []histogram.Span{{Offset: -51, Length: 9}, {Offset: 9, Length: 2}, {Offset: 14, Length: 1}},
bucketsB: []int64{1, 1, 0, 1, -1, 1, 0, -1, -1, 0, 0, 0},
expectReset: false,
expectForwardInserts: []Insert{{pos: 8, num: 1, bucketIdx: -43}, {pos: 8, num: 1, bucketIdx: -33}},
expectMergedSpans: []histogram.Span{{Offset: -51, Length: 9}, {Offset: 9, Length: 2}, {Offset: 14, Length: 1}},
expectBucketsA: []int64{1, 0, 1, -1, 1, 1, 0, -1, -2, 0, 1, 0},
// 1 0 1 -1 1 1 0 -1 -2 -2 1 0
expectBucketsB: []int64{1, 1, 0, 1, -1, 1, 0, -1, -1, 0, 0, 0},
},
"real example 1": {
// I- 6543210987654321
// A: 0__2_______0__
// B: _0130_____00_0
// A': 00020_____00_0
// B': 00130_____00_0
spansA: []histogram.Span{{Offset: -16, Length: 1}, {Offset: 2, Length: 1}, {Offset: 7, Length: 1}},
bucketsA: []int64{0, 2, -2},
spansB: []histogram.Span{{Offset: -15, Length: 4}, {Offset: 5, Length: 2}, {Offset: 1, Length: 1}},
bucketsB: []int64{0, 1, 2, -3, 0, 0, 0},
expectReset: false,
expectForwardInserts: []Insert{{pos: 1, num: 2, bucketIdx: -15}, {pos: 2, num: 1, bucketIdx: -12}, {pos: 2, num: 1, bucketIdx: -6}, {pos: 3, num: 1, bucketIdx: -3}},
expectBackwardInserts: []Insert{{pos: 0, num: 1, bucketIdx: -16}},
expectMergedSpans: []histogram.Span{{Offset: -16, Length: 5}, {Offset: 5, Length: 2}, {Offset: 1, Length: 1}},
expectBucketsA: []int64{0, 0, 0, 2, -2, 0, 0, 0},
expectBucketsB: []int64{0, 0, 1, 2, -3, 0, 0, 0},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// Sanity check.
require.Len(t, tc.bucketsA, countSpans(tc.spansA))
require.Len(t, tc.bucketsB, countSpans(tc.spansB))
require.Len(t, tc.expectBucketsA, countSpans(tc.expectMergedSpans))
require.Len(t, tc.expectBucketsB, countSpans(tc.expectMergedSpans))
t.Run("integers", func(t *testing.T) {
fInserts, bInserts, ok := expandIntSpansAndBuckets(tc.spansA, tc.spansB, tc.bucketsA, tc.bucketsB)
if tc.expectReset {
require.False(t, ok)
return
}
require.Equal(t, tc.expectForwardInserts, fInserts, "forward inserts")
require.Equal(t, tc.expectBackwardInserts, bInserts, "backward inserts")
gotBspans := adjustForInserts(tc.spansB, bInserts)
require.Equal(t, tc.expectMergedSpans, gotBspans)
gotAbuckets := make([]int64, len(tc.expectBucketsA))
insert(tc.bucketsA, gotAbuckets, fInserts, true)
require.Equal(t, tc.expectBucketsA, gotAbuckets)
gotBbuckets := make([]int64, len(tc.expectBucketsB))
insert(tc.bucketsB, gotBbuckets, bInserts, true)
require.Equal(t, tc.expectBucketsB, gotBbuckets)
})
t.Run("floats", func(t *testing.T) {
aXorValues := make([]xorValue, len(tc.bucketsA))
absolute := float64(0)
for i, v := range tc.bucketsA {
absolute += float64(v)
aXorValues[i].value = absolute
}
makeFloatBuckets := func(in []int64) []float64 {
out := make([]float64, len(in))
absolute = float64(0)
for i, v := range in {
absolute += float64(v)
out[i] = absolute
}
return out
}
bFloatBuckets := makeFloatBuckets(tc.bucketsB)
fInserts, bInserts, ok := expandFloatSpansAndBuckets(tc.spansA, tc.spansB, aXorValues, bFloatBuckets)
if tc.expectReset {
require.False(t, ok)
return
}
require.Equal(t, tc.expectForwardInserts, fInserts, "forward inserts")
require.Equal(t, tc.expectBackwardInserts, bInserts, "backward inserts")
gotBspans := adjustForInserts(tc.spansB, bInserts)
require.Equal(t, tc.expectMergedSpans, gotBspans)
gotAbuckets := make([]float64, len(tc.expectBucketsA))
insert(makeFloatBuckets(tc.bucketsA), gotAbuckets, fInserts, false)
require.Equal(t, makeFloatBuckets(tc.expectBucketsA), gotAbuckets)
gotBbuckets := make([]float64, len(tc.expectBucketsB))
insert(makeFloatBuckets(tc.bucketsB), gotBbuckets, bInserts, false)
require.Equal(t, makeFloatBuckets(tc.expectBucketsB), gotBbuckets)
})
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/histogram_meta.go | tsdb/chunkenc/histogram_meta.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"math"
"github.com/prometheus/prometheus/model/histogram"
)
func writeHistogramChunkLayout(
b *bstream, schema int32, zeroThreshold float64,
positiveSpans, negativeSpans []histogram.Span, customValues []float64,
) {
putZeroThreshold(b, zeroThreshold)
putVarbitInt(b, int64(schema))
putHistogramChunkLayoutSpans(b, positiveSpans)
putHistogramChunkLayoutSpans(b, negativeSpans)
if histogram.IsCustomBucketsSchema(schema) {
putHistogramChunkLayoutCustomBounds(b, customValues)
}
}
func readHistogramChunkLayout(b *bstreamReader) (
schema int32, zeroThreshold float64,
positiveSpans, negativeSpans []histogram.Span,
customValues []float64,
err error,
) {
zeroThreshold, err = readZeroThreshold(b)
if err != nil {
return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
v, err := readVarbitInt(b)
if err != nil {
return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
schema = int32(v)
positiveSpans, err = readHistogramChunkLayoutSpans(b)
if err != nil {
return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
negativeSpans, err = readHistogramChunkLayoutSpans(b)
if err != nil {
return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
if histogram.IsCustomBucketsSchema(schema) {
customValues, err = readHistogramChunkLayoutCustomBounds(b)
if err != nil {
return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
}
return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
func putHistogramChunkLayoutSpans(b *bstream, spans []histogram.Span) {
putVarbitUint(b, uint64(len(spans)))
for _, s := range spans {
putVarbitUint(b, uint64(s.Length))
putVarbitInt(b, int64(s.Offset))
}
}
func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
var spans []histogram.Span
num, err := readVarbitUint(b)
if err != nil {
return nil, err
}
for i := 0; i < int(num); i++ {
length, err := readVarbitUint(b)
if err != nil {
return nil, err
}
offset, err := readVarbitInt(b)
if err != nil {
return nil, err
}
spans = append(spans, histogram.Span{
Length: uint32(length),
Offset: int32(offset),
})
}
return spans, nil
}
func putHistogramChunkLayoutCustomBounds(b *bstream, customValues []float64) {
putVarbitUint(b, uint64(len(customValues)))
for _, bound := range customValues {
putCustomBound(b, bound)
}
}
func readHistogramChunkLayoutCustomBounds(b *bstreamReader) ([]float64, error) {
var customValues []float64
num, err := readVarbitUint(b)
if err != nil {
return nil, err
}
for i := 0; i < int(num); i++ {
bound, err := readCustomBound(b)
if err != nil {
return nil, err
}
customValues = append(customValues, bound)
}
return customValues, nil
}
// putZeroThreshold writes the zero threshold to the bstream. It stores typical
// values in just one byte, but needs 9 bytes for other values. In detail:
// - If the threshold is 0, store a single zero byte.
// - If the threshold is a power of 2 between (and including) 2^-243 and 2^10,
// take the exponent from the IEEE 754 representation of the threshold, which
// covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242
// in IEEE 754 representation, and 2^10 is 0.5*2^11.) Add 243 to the exponent
// and store the result (which will be between 1 and 254) as a single
// byte. Note that small powers of two are preferred values for the zero
// threshold. The default value for the zero threshold is 2^-128 (or
// 0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a
// single byte (with value 116).
// - In all other cases, store 255 as a single byte, followed by the 8 bytes of
// the threshold as a float64, i.e. taking 9 bytes in total.
func putZeroThreshold(b *bstream, threshold float64) {
if threshold == 0 {
b.writeByte(0)
return
}
frac, exp := math.Frexp(threshold)
if frac != 0.5 || exp < -242 || exp > 11 {
b.writeByte(255)
b.writeBits(math.Float64bits(threshold), 64)
return
}
b.writeByte(byte(exp + 243))
}
// readZeroThreshold reads the zero threshold written with putZeroThreshold.
func readZeroThreshold(br *bstreamReader) (float64, error) {
b, err := br.ReadByte()
if err != nil {
return 0, err
}
switch b {
case 0:
return 0, nil
case 255:
v, err := br.readBits(64)
if err != nil {
return 0, err
}
return math.Float64frombits(v), nil
default:
return math.Ldexp(0.5, int(b)-243), nil
}
}
// isWholeWhenMultiplied checks to see if the number when multiplied by 1000 can
// be converted into an integer without losing precision.
func isWholeWhenMultiplied(in float64) bool {
i := uint(math.Round(in * 1000))
out := float64(i) / 1000
return in == out
}
// putCustomBound writes a custom bound to the bstream. It stores values from
// 0 to 33554.430 (inclusive) that are multiples of 0.001 in unsigned varbit
// encoding of up to 4 bytes, but needs 1 bit + 8 bytes for other values like
// negative numbers, numbers greater than 33554.430, or numbers that are not
// a multiple of 0.001, on the assumption that they are less common. In detail:
// - Multiply the bound by 1000, without rounding.
// - If the multiplied bound is >= 0, <= 33554430 and a whole number,
// add 1 and store it in unsigned varbit encoding. All these numbers are
// greater than 0, so the leading bit of the varbit is always 1!
// - Otherwise, store a 0 bit, followed by the 8 bytes of the original
// bound as a float64.
//
// When reading the values, we can first decode a value as unsigned varbit,
// if it's 0, then we read the next 8 bytes as a float64, otherwise
// we can convert the value to a float64 by subtracting 1 and dividing by 1000.
func putCustomBound(b *bstream, f float64) {
tf := f * 1000
// 33554431-1 comes from the maximum that can be stored in a varbit in 4
// bytes, other values are stored in 8 bytes anyway.
if tf < 0 || tf > 33554430 || !isWholeWhenMultiplied(f) {
b.writeBit(zero)
b.writeBits(math.Float64bits(f), 64)
return
}
putVarbitUint(b, uint64(math.Round(tf))+1)
}
// readCustomBound reads the custom bound written with putCustomBound.
func readCustomBound(br *bstreamReader) (float64, error) {
b, err := readVarbitUint(br)
if err != nil {
return 0, err
}
switch b {
case 0:
v, err := br.readBits(64)
if err != nil {
return 0, err
}
return math.Float64frombits(v), nil
default:
return float64(b-1) / 1000, nil
}
}
type bucketIterator struct {
spans []histogram.Span
span int // Span position of last yielded bucket.
bucket int // Bucket position within span of last yielded bucket.
idx int // Bucket index (globally across all spans) of last yielded bucket.
}
func newBucketIterator(spans []histogram.Span) *bucketIterator {
b := bucketIterator{
spans: spans,
span: 0,
bucket: -1,
idx: -1,
}
if len(spans) > 0 {
b.idx += int(spans[0].Offset)
}
return &b
}
func (b *bucketIterator) Next() (int, bool) {
// We're already out of bounds.
if b.span >= len(b.spans) {
return 0, false
}
if b.bucket < int(b.spans[b.span].Length)-1 { // Try to move within same span.
b.bucket++
b.idx++
return b.idx, true
}
for b.span < len(b.spans)-1 { // Try to move from one span to the next.
b.span++
b.idx += int(b.spans[b.span].Offset + 1)
b.bucket = 0
if b.spans[b.span].Length == 0 {
b.idx--
continue
}
return b.idx, true
}
// We're out of options.
return 0, false
}
// An Insert describes how many new buckets have to be inserted before
// processing the pos'th bucket from the original slice.
type Insert struct {
pos int
num int
// Optional: bucketIdx is the index of the bucket that is inserted.
// Can be used to adjust spans.
bucketIdx int
}
// expandSpansBothWays is similar to expandFloatSpansAndBuckets and
// expandIntSpansAndBuckets, but now b may also cover an entirely different set
// of buckets and counter resets are ignored. The function returns the “forward”
// inserts to expand 'a' to also cover all the buckets exclusively covered by
// 'b', and it returns the “backward” inserts to expand 'b' to also cover all
// the buckets exclusively covered by 'a'.
func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var fInserts, bInserts []Insert
var lastBucket int
addBucket := func(b int) {
offset := b - lastBucket - 1
if offset == 0 && len(mergedSpans) > 0 {
mergedSpans[len(mergedSpans)-1].Length++
} else {
if len(mergedSpans) == 0 {
offset++
}
mergedSpans = append(mergedSpans, histogram.Span{
Offset: int32(offset),
Length: 1,
})
}
lastBucket = b
}
// When fInter.num (or bInter.num, respectively) becomes > 0, this
// becomes a valid insert that should be yielded when we finish a streak
// of new buckets.
var fInter, bInter Insert
av, aOK := ai.Next()
bv, bOK := bi.Next()
loop:
for {
switch {
case aOK && bOK:
switch {
case av == bv: // Both have an identical value. move on!
// Finish WIP insert and reset.
if fInter.num > 0 {
fInserts = append(fInserts, fInter)
fInter.num = 0
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
addBucket(av)
av, aOK = ai.Next()
bv, bOK = bi.Next()
fInter.pos++
bInter.pos++
case av < bv: // b misses a value that is in a.
bInter.num++
// Collect the forward inserts before advancing
// the position of 'a'.
if fInter.num > 0 {
fInserts = append(fInserts, fInter)
fInter.num = 0
}
addBucket(av)
fInter.pos++
av, aOK = ai.Next()
case av > bv: // a misses a value that is in b. Forward b and recompare.
fInter.num++
// Collect the backward inserts before advancing the
// position of 'b'.
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
addBucket(bv)
bInter.pos++
bv, bOK = bi.Next()
}
case aOK && !bOK: // b misses a value that is in a.
bInter.num++
addBucket(av)
av, aOK = ai.Next()
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
fInter.num++
addBucket(bv)
bv, bOK = bi.Next()
default: // Both iterators ran out. We're done.
if fInter.num > 0 {
fInserts = append(fInserts, fInter)
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
}
break loop
}
}
return fInserts, bInserts, mergedSpans
}
type bucketValue interface {
int64 | float64
}
// insert merges 'in' with the provided inserts and writes them into 'out',
// which must already have the appropriate length. 'out' is also returned for
// convenience.
func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV {
var (
oi int // Position in out.
v BV // The last value seen.
ii int // The next insert to process.
)
for i, d := range in {
if ii >= len(inserts) || i != inserts[ii].pos {
// No inserts at this position, the original delta is still valid.
out[oi] = d
oi++
v += d
continue
}
// Process inserts.
firstInsert := true
for ii < len(inserts) && i == inserts[ii].pos {
// We have an insert!
// Add insert.num new delta values such that their
// bucket values equate 0. When deltas==false, it means
// that it is an absolute value. So we set it to 0
// directly.
if deltas && firstInsert {
out[oi] = -v
firstInsert = false // No need to go to 0 in further inserts.
} else {
out[oi] = 0
}
oi++
for x := 1; x < inserts[ii].num; x++ {
out[oi] = 0
oi++
}
ii++
}
// Now save the value from the input. The delta value we
// should save is the original delta value + the last
// value of the point before the insert (to undo the
// delta that was introduced by the insert). When
// deltas==false, it means that it is an absolute value,
// so we set it directly to the value in the 'in' slice.
if deltas {
out[oi] = d + v
} else {
out[oi] = d
}
oi++
v += d
}
// Insert empty buckets at the end.
for ii < len(inserts) {
if inserts[ii].pos < len(in) {
panic("leftover inserts must be after the current buckets")
}
// Add insert.num new delta values such that their
// bucket values equate 0. When deltas==false, it means
// that it is an absolute value. So we set it to 0
// directly.
if deltas {
out[oi] = -v
} else {
out[oi] = 0
}
oi++
for x := 1; x < inserts[ii].num; x++ {
out[oi] = 0
oi++
}
ii++
v = 0
}
return out
}
// counterResetHint returns a CounterResetHint based on the CounterResetHeader
// and on the position into the chunk.
func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterResetHint {
switch {
case crh == GaugeType:
// A gauge histogram chunk only contains gauge histograms.
return histogram.GaugeType
case numRead > 1:
// In a counter histogram chunk, there will not be any counter
// resets after the first histogram.
return histogram.NotCounterReset
default:
// Sadly, we have to return "unknown" as the hint for all other
// cases, even if we know that the chunk was started with or without a
// counter reset. But we cannot be sure that the previous chunk
// still exists in the TSDB, or if the previous chunk was added later
// by out of order or backfill, so we conservatively return "unknown".
//
// TODO: If we can detect whether the previous and current chunk are
// actually consecutive then we could trust its hint:
// https://github.com/prometheus/prometheus/issues/15346.
return histogram.UnknownCounterReset
}
}
// adjustForInserts adjusts the spans for the given inserts.
func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []histogram.Span) {
if len(inserts) == 0 {
return spans
}
it := newBucketIterator(spans)
var (
lastBucket int
i int
insertIdx = inserts[i].bucketIdx
insertNum = inserts[i].num
)
addBucket := func(b int) {
offset := b - lastBucket - 1
if offset == 0 && len(mergedSpans) > 0 {
mergedSpans[len(mergedSpans)-1].Length++
} else {
if len(mergedSpans) == 0 {
offset++
}
mergedSpans = append(mergedSpans, histogram.Span{
Offset: int32(offset),
Length: 1,
})
}
lastBucket = b
}
consumeInsert := func() {
// Consume the insert.
insertNum--
if insertNum == 0 {
i++
if i < len(inserts) {
insertIdx = inserts[i].bucketIdx
insertNum = inserts[i].num
}
} else {
insertIdx++
}
}
bucket, ok := it.Next()
for ok {
if i < len(inserts) && insertIdx < bucket {
addBucket(insertIdx)
consumeInsert()
} else {
addBucket(bucket)
bucket, ok = it.Next()
}
}
for i < len(inserts) {
addBucket(insertIdx)
consumeInsert()
}
return mergedSpans
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/chunk.go | tsdb/chunkenc/chunk.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"fmt"
"math"
"sync"
"github.com/prometheus/prometheus/model/histogram"
)
// Encoding is the identifier for a chunk encoding.
type Encoding uint8
// The different available chunk encodings.
const (
EncNone Encoding = iota
EncXOR
EncHistogram
EncFloatHistogram
)
func (e Encoding) String() string {
switch e {
case EncNone:
return "none"
case EncXOR:
return "XOR"
case EncHistogram:
return "histogram"
case EncFloatHistogram:
return "floathistogram"
}
return "<unknown>"
}
// IsValidEncoding returns true for supported encodings.
func IsValidEncoding(e Encoding) bool {
return e == EncXOR || e == EncHistogram || e == EncFloatHistogram
}
const (
// MaxBytesPerXORChunk is the maximum size an XOR chunk can be.
MaxBytesPerXORChunk = 1024
// TargetBytesPerHistogramChunk sets a size target for each histogram chunk.
TargetBytesPerHistogramChunk = 1024
// MinSamplesPerHistogramChunk sets a minimum sample count for histogram chunks. This is desirable because a single
// histogram sample can be larger than TargetBytesPerHistogramChunk but we want to avoid too-small sample count
// chunks so we can achieve some measure of compression advantage even while dealing with really large histograms.
// Note that this minimum sample count is not enforced across chunk range boundaries (for example, if the chunk
// range is 100 and the first sample in the chunk range is 99, the next sample will be included in a new chunk
// resulting in the old chunk containing only a single sample).
MinSamplesPerHistogramChunk = 10
)
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
type Chunk interface {
Iterable
// Bytes returns the underlying byte slice of the chunk.
Bytes() []byte
// Encoding returns the encoding type of the chunk.
Encoding() Encoding
// Appender returns an appender to append samples to the chunk.
Appender() (Appender, error)
// NumSamples returns the number of samples in the chunk.
NumSamples() int
// Compact is called whenever a chunk is expected to be complete (no more
// samples appended) and the underlying implementation can eventually
// optimize the chunk.
// There's no strong guarantee that no samples will be appended once
// Compact() is called. Implementing this function is optional.
Compact()
// Reset resets the chunk given stream.
Reset(stream []byte)
}
type Iterable interface {
// The iterator passed as argument is for re-use.
// Depending on implementation, the iterator can
// be re-used or a new iterator can be allocated.
Iterator(Iterator) Iterator
}
// Appender adds sample pairs to a chunk.
type Appender interface {
Append(int64, float64)
// AppendHistogram and AppendFloatHistogram append a histogram sample to a histogram or float histogram chunk.
// Appending a histogram may require creating a completely new chunk or recoding (changing) the current chunk.
// The Appender prev is used to determine if there is a counter reset between the previous Appender and the current Appender.
// The Appender prev is optional and only taken into account when the first sample is being appended.
// The bool appendOnly governs what happens when a sample cannot be appended to the current chunk. If appendOnly is true, then
// in such case an error is returned without modifying the chunk. If appendOnly is false, then a new chunk is created or the
// current chunk is recoded to accommodate the sample.
// The returned Chunk c is nil if sample could be appended to the current Chunk, otherwise c is the new Chunk.
// The returned bool isRecoded can be used to distinguish between the new Chunk c being a completely new Chunk
// or the current Chunk recoded to a new Chunk.
// The Appender app that can be used for the next append is always returned.
AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
}
// Iterator is a simple iterator that can only get the next value.
// Iterator iterates over the samples of a time series, in timestamp-increasing order.
type Iterator interface {
// Next advances the iterator by one and returns the type of the value
// at the new position (or ValNone if the iterator is exhausted).
Next() ValueType
// Seek advances the iterator forward to the first sample with a
// timestamp equal or greater than t. If the current sample found by a
// previous `Next` or `Seek` operation already has this property, Seek
// has no effect. If a sample has been found, Seek returns the type of
// its value. Otherwise, it returns ValNone, after which the iterator is
// exhausted.
Seek(t int64) ValueType
// At returns the current timestamp/value pair if the value is a float.
// Before the iterator has advanced, the behaviour is unspecified.
At() (int64, float64)
// AtHistogram returns the current timestamp/value pair if the value is a
// histogram with integer counts. Before the iterator has advanced, the behaviour
// is unspecified.
// The method accepts an optional Histogram object which will be
// reused when not nil. Otherwise, a new Histogram object will be allocated.
AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram)
// AtFloatHistogram returns the current timestamp/value pair if the
// value is a histogram with floating-point counts. It also works if the
// value is a histogram with integer counts, in which case a
// FloatHistogram copy of the histogram is returned. Before the iterator
// has advanced, the behaviour is unspecified.
// The method accepts an optional FloatHistogram object which will be
// reused when not nil. Otherwise, a new FloatHistogram object will be allocated.
AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram)
// AtT returns the current timestamp.
// Before the iterator has advanced, the behaviour is unspecified.
AtT() int64
// Err returns the current error. It should be used only after the
// iterator is exhausted, i.e. `Next` or `Seek` have returned ValNone.
Err() error
}
// ValueType defines the type of a value an Iterator points to.
type ValueType uint8
// Possible values for ValueType.
const (
ValNone ValueType = iota // No value at the current position.
ValFloat // A simple float, retrieved with At.
ValHistogram // A histogram, retrieve with AtHistogram, but AtFloatHistogram works, too.
ValFloatHistogram // A floating-point histogram, retrieve with AtFloatHistogram.
)
func (v ValueType) String() string {
switch v {
case ValNone:
return "none"
case ValFloat:
return "float"
case ValHistogram:
return "histogram"
case ValFloatHistogram:
return "floathistogram"
default:
return "unknown"
}
}
func (v ValueType) ChunkEncoding() Encoding {
switch v {
case ValFloat:
return EncXOR
case ValHistogram:
return EncHistogram
case ValFloatHistogram:
return EncFloatHistogram
default:
return EncNone
}
}
func (v ValueType) NewChunk() (Chunk, error) {
switch v {
case ValFloat:
return NewXORChunk(), nil
case ValHistogram:
return NewHistogramChunk(), nil
case ValFloatHistogram:
return NewFloatHistogramChunk(), nil
default:
return nil, fmt.Errorf("value type %v unsupported", v)
}
}
// MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values.
func MockSeriesIterator(timestamps []int64, values []float64) Iterator {
return &mockSeriesIterator{
timeStamps: timestamps,
values: values,
currIndex: -1,
}
}
type mockSeriesIterator struct {
timeStamps []int64
values []float64
currIndex int
}
func (*mockSeriesIterator) Seek(int64) ValueType { return ValNone }
func (it *mockSeriesIterator) At() (int64, float64) {
return it.timeStamps[it.currIndex], it.values[it.currIndex]
}
func (*mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
return math.MinInt64, nil
}
func (*mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return math.MinInt64, nil
}
func (it *mockSeriesIterator) AtT() int64 {
return it.timeStamps[it.currIndex]
}
func (it *mockSeriesIterator) Next() ValueType {
if it.currIndex < len(it.timeStamps)-1 {
it.currIndex++
return ValFloat
}
return ValNone
}
func (*mockSeriesIterator) Err() error { return nil }
// NewNopIterator returns a new chunk iterator that does not hold any data.
func NewNopIterator() Iterator {
return nopIterator{}
}
type nopIterator struct{}
func (nopIterator) Next() ValueType { return ValNone }
func (nopIterator) Seek(int64) ValueType { return ValNone }
func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 }
func (nopIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
return math.MinInt64, nil
}
func (nopIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return math.MinInt64, nil
}
func (nopIterator) AtT() int64 { return math.MinInt64 }
func (nopIterator) Err() error { return nil }
// Pool is used to create and reuse chunk references to avoid allocations.
type Pool interface {
Put(Chunk) error
Get(e Encoding, b []byte) (Chunk, error)
}
// pool is a memory pool of chunk objects.
type pool struct {
xor sync.Pool
histogram sync.Pool
floatHistogram sync.Pool
}
// NewPool returns a new pool.
func NewPool() Pool {
return &pool{
xor: sync.Pool{
New: func() any {
return &XORChunk{b: bstream{}}
},
},
histogram: sync.Pool{
New: func() any {
return &HistogramChunk{b: bstream{}}
},
},
floatHistogram: sync.Pool{
New: func() any {
return &FloatHistogramChunk{b: bstream{}}
},
},
}
}
func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
var c Chunk
switch e {
case EncXOR:
c = p.xor.Get().(*XORChunk)
case EncHistogram:
c = p.histogram.Get().(*HistogramChunk)
case EncFloatHistogram:
c = p.floatHistogram.Get().(*FloatHistogramChunk)
default:
return nil, fmt.Errorf("invalid chunk encoding %q", e)
}
c.Reset(b)
return c, nil
}
func (p *pool) Put(c Chunk) error {
var sp *sync.Pool
var ok bool
switch c.Encoding() {
case EncXOR:
_, ok = c.(*XORChunk)
sp = &p.xor
case EncHistogram:
_, ok = c.(*HistogramChunk)
sp = &p.histogram
case EncFloatHistogram:
_, ok = c.(*FloatHistogramChunk)
sp = &p.floatHistogram
default:
return fmt.Errorf("invalid chunk encoding %q", c.Encoding())
}
if !ok {
// This may happen often with wrapped chunks. Nothing we can really do about
// it but returning an error would cause a lot of allocations again. Thus,
// we just skip it.
return nil
}
c.Reset(nil)
sp.Put(c)
return nil
}
// FromData returns a chunk from a byte slice of chunk data.
// This is there so that users of the library can easily create chunks from
// bytes.
func FromData(e Encoding, d []byte) (Chunk, error) {
switch e {
case EncXOR:
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
case EncHistogram:
return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil
case EncFloatHistogram:
return &FloatHistogramChunk{b: bstream{count: 0, stream: d}}, nil
}
return nil, fmt.Errorf("invalid chunk encoding %q", e)
}
// NewEmptyChunk returns an empty chunk for the given encoding.
func NewEmptyChunk(e Encoding) (Chunk, error) {
switch e {
case EncXOR:
return NewXORChunk(), nil
case EncHistogram:
return NewHistogramChunk(), nil
case EncFloatHistogram:
return NewFloatHistogramChunk(), nil
}
return nil, fmt.Errorf("invalid chunk encoding %q", e)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/histogram.go | tsdb/chunkenc/histogram.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/value"
)
// HistogramChunk holds encoded sample data for a sparse, high-resolution
// histogram.
//
// Each sample has multiple "fields", stored in the following way (raw = store
// number directly, delta = store delta to the previous number, dod = store
// delta of the delta to the previous number, xor = what we do for regular
// sample values):
//
// field → ts count zeroCount sum []posbuckets []negbuckets
// sample 1 raw raw raw raw []raw []raw
// sample 2 delta delta delta xor []delta []delta
// sample >2 dod dod dod xor []dod []dod
type HistogramChunk struct {
b bstream
}
// NewHistogramChunk returns a new chunk with histogram encoding of the given
// size.
func NewHistogramChunk() *HistogramChunk {
b := make([]byte, histogramHeaderSize, chunkAllocationSize)
return &HistogramChunk{b: bstream{stream: b, count: 0}}
}
func (c *HistogramChunk) Reset(stream []byte) {
c.b.Reset(stream)
}
// Encoding returns the encoding type.
func (*HistogramChunk) Encoding() Encoding {
return EncHistogram
}
// Bytes returns the underlying byte slice of the chunk.
func (c *HistogramChunk) Bytes() []byte {
return c.b.bytes()
}
// NumSamples returns the number of samples in the chunk.
func (c *HistogramChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
// CounterResetHeader defines the first 2 bits of the chunk header.
type CounterResetHeader byte
const (
// CounterReset means there was definitely a counter reset that resulted in this chunk.
CounterReset CounterResetHeader = 0b10000000
// NotCounterReset means there was definitely no counter reset when cutting this chunk.
NotCounterReset CounterResetHeader = 0b01000000
// GaugeType means this chunk contains a gauge histogram, where counter resets do not happen.
GaugeType CounterResetHeader = 0b11000000
// UnknownCounterReset means we cannot say if this chunk was created due to a counter reset or not.
// An explicit counter reset detection needs to happen during query time.
UnknownCounterReset CounterResetHeader = 0b00000000
// CounterResetHeaderMask is the mask to get the counter reset header bits.
CounterResetHeaderMask byte = 0b11000000
// Position within the header bytes at the start of the stream.
histogramFlagPos = 2
// Total header size.
histogramHeaderSize = 3
)
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header.
func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(c.Bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
// Compact implements the Chunk interface.
func (c *HistogramChunk) Compact() {
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
buf := make([]byte, l)
copy(buf, c.b.stream)
c.b.stream = buf
}
}
// Appender implements the Chunk interface.
func (c *HistogramChunk) Appender() (Appender, error) {
if len(c.b.stream) == histogramHeaderSize { // Avoid allocating an Iterator when chunk is empty.
return &HistogramAppender{b: &c.b, t: math.MinInt64, leading: 0xff}, nil
}
it := c.iterator(nil)
// To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state.
for it.Next() == ValHistogram {
}
if err := it.Err(); err != nil {
return nil, err
}
a := &HistogramAppender{
b: &c.b,
schema: it.schema,
zThreshold: it.zThreshold,
pSpans: it.pSpans,
nSpans: it.nSpans,
customValues: it.customValues,
t: it.t,
cnt: it.cnt,
zCnt: it.zCnt,
tDelta: it.tDelta,
cntDelta: it.cntDelta,
zCntDelta: it.zCntDelta,
pBuckets: it.pBuckets,
nBuckets: it.nBuckets,
pBucketsDelta: it.pBucketsDelta,
nBucketsDelta: it.nBucketsDelta,
sum: it.sum,
leading: it.leading,
trailing: it.trailing,
}
return a, nil
}
func countSpans(spans []histogram.Span) int {
var cnt int
for _, s := range spans {
cnt += int(s.Length)
}
return cnt
}
func newHistogramIterator(b []byte) *histogramIterator {
it := &histogramIterator{
br: newBReader(b[histogramHeaderSize:]),
numTotal: binary.BigEndian.Uint16(b),
t: math.MinInt64,
}
it.counterResetHeader = CounterResetHeader(b[histogramFlagPos] & CounterResetHeaderMask)
return it
}
func (c *HistogramChunk) iterator(it Iterator) *histogramIterator {
// This comment is copied from XORChunk.iterator:
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
// When using striped locks to guard access to chunks, probably yes.
// Could only copy data if the chunk is not completed yet.
if histogramIter, ok := it.(*histogramIterator); ok {
histogramIter.Reset(c.b.bytes())
return histogramIter
}
return newHistogramIterator(c.b.bytes())
}
// Iterator implements the Chunk interface.
func (c *HistogramChunk) Iterator(it Iterator) Iterator {
return c.iterator(it)
}
// HistogramAppender is an Appender implementation for sparse histograms.
type HistogramAppender struct {
b *bstream
// Layout:
schema int32
zThreshold float64
pSpans, nSpans []histogram.Span
// customValues is read only after the first sample is appended.
customValues []float64
// Although we intend to start new chunks on counter resets, we still
// have to handle negative deltas for gauge histograms. Therefore, even
// deltas are signed types here (even for tDelta to not treat that one
// specially).
t int64
cnt, zCnt uint64
tDelta, cntDelta, zCntDelta int64
pBuckets, nBuckets []int64
pBucketsDelta, nBucketsDelta []int64
// The sum is Gorilla xor encoded.
sum float64
leading uint8
trailing uint8
}
func (a *HistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
func (a *HistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
a.b.bytes()[histogramFlagPos] = (a.b.bytes()[histogramFlagPos] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
}
func (a *HistogramAppender) NumSamples() int {
return int(binary.BigEndian.Uint16(a.b.bytes()))
}
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
func (*HistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk")
}
// appendable returns whether the chunk can be appended to, and if so whether
// 1. Any recoding needs to happen to the chunk using the provided forward
// inserts (in case of any new buckets, positive or negative range,
// respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// If the sample is a gauge histogram, AppendableGauge must be used instead.
//
// The chunk is not appendable in the following cases:
//
// - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed.
// - Any buckets have disappeared, unless the bucket count was 0, unused.
// Empty bucket can happen if the chunk was recoded and we're merging a non
// recoded histogram. In this case backward inserts will be provided.
// - There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
// - The last sample in the chunk was stale while the current sample is not stale.
//
// The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to
// append. If counterReset is true, okToAppend is always false.
//
// The method returns an additional CounterResetHeader value that indicates the
// status of the counter reset detection. But it returns UnknownCounterReset
// when schema or zero threshold changed, because we don't do a full counter
// reset detection.
func (a *HistogramAppender) appendable(h *histogram.Histogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
okToAppend bool, counterResetHint CounterResetHeader,
) {
counterResetHint = NotCounterReset
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.CounterResetHint == histogram.CounterReset {
// Always honor the explicit counter reset hint.
counterResetHint = CounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if value.IsStaleNaN(a.sum) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
counterResetHint = UnknownCounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.Count < a.cnt {
// There has been a counter reset.
counterResetHint = CounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
// This case might or might not go along with a counter reset and
// we do not want to invest the work of a full counter reset detection
// as long as https://github.com/prometheus/prometheus/issues/15346 is still open.
// TODO: consider adding the counter reset detection here once #15346 is fixed.
counterResetHint = UnknownCounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
counterResetHint = CounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.ZeroCount < a.zCnt {
// There has been a counter reset since ZeroThreshold didn't change.
counterResetHint = CounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
var ok bool
positiveInserts, backwardPositiveInserts, ok = expandIntSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
if !ok {
counterResetHint = CounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
negativeInserts, backwardNegativeInserts, ok = expandIntSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
if !ok {
counterResetHint = CounterReset
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
// expandIntSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
// they match the spans in 'b'. 'b' must cover the same or more buckets than
// 'a', otherwise the function will return false.
// The function also returns the inserts to expand 'b' to also cover all the
// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
// The function also checks for counter resets between 'a' and 'b'.
//
// Example:
//
// Let's say the old buckets look like this:
//
// span syntax: [offset, length]
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
// raw values 6 3 3 2 4 5 1
// deltas 6 -3 0 -1 2 1 -4
//
// But now we introduce a new bucket layout. (Carefully chosen example where we
// have a span appended, one unchanged[*], one prepended, and two merge - in
// that order.)
//
// [*] unchanged in terms of which bucket indices they represent. but to achieve
// that, their offset needs to change if "disrupted" by spans changing ahead of
// them
//
// \/ this one is "unchanged"
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
// raw values 6 3 0 3 0 0 2 4 5 0 1
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
// delta mods: / \ / \ / \
//
// Note for histograms with delta-encoded buckets: Whenever any new buckets are
// introduced, the subsequent "old" bucket needs to readjust its delta to the
// new base of 0. Thus, for the caller who wants to transform the set of
// original deltas to a new set of deltas to match a new span layout that adds
// buckets, we simply need to generate a list of inserts.
//
// Note: Within expandIntSpansAndBuckets we don't have to worry about the changes to the
// spans themselves, thanks to the iterators we get to work with the more useful
// bucket indices (which of course directly correspond to the buckets we have to
// adjust).
func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) (forward, backward []Insert, ok bool) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
// be yielded when we finish a streak of new buckets.
var aInter Insert
var bInter Insert
aIdx, aOK := ai.Next()
bIdx, bOK := bi.Next()
// Bucket count. Initialize the absolute count and index into the
// positive/negative counts or deltas array. The bucket count is
// used to detect counter reset as well as unused buckets in a.
var (
aCount int64
bCount int64
aCountIdx int
bCountIdx int
)
if aOK {
aCount = aBuckets[aCountIdx]
}
if bOK {
bCount = bBuckets[bCountIdx]
}
// addInsert updates the current Insert with a new insert at the given
// bucket index (otherIdx).
addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert {
if insert.num == 0 {
// First insert.
insert.bucketIdx = otherIdx
} else if insert.bucketIdx+insert.num != otherIdx {
// Insert is not continuous from previous insert.
inserts = append(inserts, *insert)
insert.num = 0
insert.bucketIdx = otherIdx
}
insert.num++
return inserts
}
advanceA := func() {
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
aIdx, aOK = ai.Next()
aInter.pos++
aCountIdx++
if aOK {
aCount += aBuckets[aCountIdx]
}
}
advanceB := func() {
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
bIdx, bOK = bi.Next()
bInter.pos++
bCountIdx++
if bOK {
bCount += bBuckets[bCountIdx]
}
}
loop:
for {
switch {
case aOK && bOK:
switch {
case aIdx == bIdx: // Both have an identical bucket index.
// Bucket count. Check bucket for reset from a to b.
if aCount > bCount {
return nil, nil, false
}
advanceA()
advanceB()
continue
case aIdx < bIdx: // b misses a bucket index that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInserts = addInsert(bInserts, &bInter, aIdx)
advanceA()
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
aInserts = addInsert(aInserts, &aInter, bIdx)
advanceB()
}
case aOK && !bOK: // b misses a value that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInserts = addInsert(bInserts, &bInter, aIdx)
advanceA()
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
aInserts = addInsert(aInserts, &aInter, bIdx)
advanceB()
default: // Both iterators ran out. We're done.
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
}
break loop
}
}
return aInserts, bInserts, true
}
// appendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided forward
// inserts (in case of any new buckets, positive or negative range,
// respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// This method must be only used for gauge histograms.
//
// The chunk is not appendable in the following cases:
// - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(a.sum) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true
return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
// appendHistogram appends a histogram to the chunk. The caller must ensure that
// the histogram is properly structured, e.g. the number of buckets used
// corresponds to the number conveyed by the span structures. First call
// Appendable() and act accordingly!
func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
var tDelta, cntDelta, zCntDelta int64
num := binary.BigEndian.Uint16(a.b.bytes())
if value.IsStaleNaN(h.Sum) {
// Emptying out other fields to write no buckets, and an empty
// layout in case of first histogram in the chunk.
h = &histogram.Histogram{Sum: h.Sum}
}
if num == 0 {
// The first append gets the privilege to dictate the layout
// but it's also responsible for encoding it into the chunk!
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
a.schema = h.Schema
a.zThreshold = h.ZeroThreshold
if len(h.PositiveSpans) > 0 {
a.pSpans = make([]histogram.Span, len(h.PositiveSpans))
copy(a.pSpans, h.PositiveSpans)
} else {
a.pSpans = nil
}
if len(h.NegativeSpans) > 0 {
a.nSpans = make([]histogram.Span, len(h.NegativeSpans))
copy(a.nSpans, h.NegativeSpans)
} else {
a.nSpans = nil
}
if len(h.CustomValues) > 0 {
a.customValues = make([]float64, len(h.CustomValues))
copy(a.customValues, h.CustomValues)
} else {
a.customValues = nil
}
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
if numPBuckets > 0 {
a.pBuckets = make([]int64, numPBuckets)
a.pBucketsDelta = make([]int64, numPBuckets)
} else {
a.pBuckets = nil
a.pBucketsDelta = nil
}
if numNBuckets > 0 {
a.nBuckets = make([]int64, numNBuckets)
a.nBucketsDelta = make([]int64, numNBuckets)
} else {
a.nBuckets = nil
a.nBucketsDelta = nil
}
// Now store the actual data.
putVarbitInt(a.b, t)
putVarbitUint(a.b, h.Count)
putVarbitUint(a.b, h.ZeroCount)
a.b.writeBits(math.Float64bits(h.Sum), 64)
for _, b := range h.PositiveBuckets {
putVarbitInt(a.b, b)
}
for _, b := range h.NegativeBuckets {
putVarbitInt(a.b, b)
}
} else {
// The case for the 2nd sample with single deltas is implicitly
// handled correctly with the double delta code, so we don't
// need a separate single delta logic for the 2nd sample.
tDelta = t - a.t
cntDelta = int64(h.Count) - int64(a.cnt)
zCntDelta = int64(h.ZeroCount) - int64(a.zCnt)
tDod := tDelta - a.tDelta
cntDod := cntDelta - a.cntDelta
zCntDod := zCntDelta - a.zCntDelta
if value.IsStaleNaN(h.Sum) {
cntDod, zCntDod = 0, 0
}
putVarbitInt(a.b, tDod)
putVarbitInt(a.b, cntDod)
putVarbitInt(a.b, zCntDod)
a.writeSumDelta(h.Sum)
for i, b := range h.PositiveBuckets {
delta := b - a.pBuckets[i]
dod := delta - a.pBucketsDelta[i]
putVarbitInt(a.b, dod)
a.pBucketsDelta[i] = delta
}
for i, b := range h.NegativeBuckets {
delta := b - a.nBuckets[i]
dod := delta - a.nBucketsDelta[i]
putVarbitInt(a.b, dod)
a.nBucketsDelta[i] = delta
}
}
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
a.t = t
a.cnt = h.Count
a.zCnt = h.ZeroCount
a.tDelta = tDelta
a.cntDelta = cntDelta
a.zCntDelta = zCntDelta
copy(a.pBuckets, h.PositiveBuckets)
copy(a.nBuckets, h.NegativeBuckets)
// Note that the bucket deltas were already updated above.
a.sum = h.Sum
}
// recode converts the current chunk to accommodate an expansion of the set of
// (positive and/or negative) buckets used, according to the provided inserts,
// resulting in the honoring of the provided new positive and negative spans. To
// continue appending, use the returned Appender rather than the receiver of
// this method.
func (a *HistogramAppender) recode(
positiveInserts, negativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
) (Chunk, Appender) {
// TODO(beorn7): This currently just decodes everything and then encodes
// it again with the new span layout. This can probably be done in-place
// by editing the chunk. But let's first see how expensive it is in the
// big picture. Also, in-place editing might create concurrency issues.
byts := a.b.bytes()
it := newHistogramIterator(byts)
hc := NewHistogramChunk()
app, err := hc.Appender()
if err != nil {
panic(err) // This should never happen for an empty histogram chunk.
}
happ := app.(*HistogramAppender)
numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
for it.Next() == ValHistogram {
tOld, hOld := it.AtHistogram(nil)
// We have to newly allocate slices for the modified buckets
// here because they are kept by the appender until the next
// append.
// TODO(beorn7): We might be able to optimize this.
var positiveBuckets, negativeBuckets []int64
if numPositiveBuckets > 0 {
positiveBuckets = make([]int64, numPositiveBuckets)
}
if numNegativeBuckets > 0 {
negativeBuckets = make([]int64, numNegativeBuckets)
}
// Save the modified histogram to the new chunk.
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
if len(positiveInserts) > 0 {
hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, true)
}
if len(negativeInserts) > 0 {
hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, true)
}
happ.appendHistogram(tOld, hOld)
}
happ.setCounterResetHeader(CounterResetHeader(byts[histogramFlagPos] & CounterResetHeaderMask))
return hc, app
}
// recodeHistogram converts the current histogram (in-place) to accommodate an
// expansion of the set of (positive and/or negative) buckets used.
func (*HistogramAppender) recodeHistogram(
h *histogram.Histogram,
pBackwardInserts, nBackwardInserts []Insert,
) {
if len(pBackwardInserts) > 0 {
numPositiveBuckets := countSpans(h.PositiveSpans)
h.PositiveBuckets = insert(h.PositiveBuckets, make([]int64, numPositiveBuckets), pBackwardInserts, true)
}
if len(nBackwardInserts) > 0 {
numNegativeBuckets := countSpans(h.NegativeSpans)
h.NegativeBuckets = insert(h.NegativeBuckets, make([]int64, numNegativeBuckets), nBackwardInserts, true)
}
}
func (a *HistogramAppender) writeSumDelta(v float64) {
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
}
func (*HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a histogram chunk")
}
func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (Chunk, bool, Appender, error) {
if a.NumSamples() == 0 {
a.appendHistogram(t, h)
if h.CounterResetHint == histogram.GaugeType {
a.setCounterResetHeader(GaugeType)
return nil, false, a, nil
}
switch {
case h.CounterResetHint == histogram.CounterReset:
// Always honor the explicit counter reset hint.
a.setCounterResetHeader(CounterReset)
case prev != nil:
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
_, _, _, _, _, counterReset := prev.appendable(h)
a.setCounterResetHeader(counterReset)
}
return nil, false, a, nil
}
// Adding counter-like histogram.
if h.CounterResetHint != histogram.GaugeType {
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterResetHint := a.appendable(h)
if !okToAppend || counterResetHint != NotCounterReset {
if appendOnly {
if counterResetHint == CounterReset {
return nil, false, a, errors.New("histogram counter reset")
}
return nil, false, a, errors.New("histogram schema change")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty histogram chunk.
}
happ := app.(*HistogramAppender)
happ.setCounterResetHeader(counterResetHint)
happ.appendHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
// No new buckets from the histogram, so the spans of the appender can accommodate the new buckets.
// However we need to make a copy in case the input is sharing spans from an iterator.
h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
copy(h.PositiveSpans, a.pSpans)
h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
copy(h.NegativeSpans, a.nSpans)
} else {
// Spans need pre-adjusting to accommodate the new buckets.
h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
}
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*HistogramAppender).appendHistogram(t, h)
return chk, true, app, nil
}
a.appendHistogram(t, h)
return nil, false, a, nil
}
// Adding gauge histogram.
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, errors.New("gauge histogram schema change")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty histogram chunk.
}
happ := app.(*HistogramAppender)
happ.setCounterResetHeader(GaugeType)
happ.appendHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram layout change with %d positive and %d negative backwards inserts", len(pBackwardInserts), len(nBackwardInserts))
}
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*HistogramAppender).appendHistogram(t, h)
return chk, true, app, nil
}
a.appendHistogram(t, h)
return nil, false, a, nil
}
func CounterResetHintToHeader(hint histogram.CounterResetHint) CounterResetHeader {
switch hint {
case histogram.CounterReset:
return CounterReset
case histogram.NotCounterReset:
return NotCounterReset
case histogram.GaugeType:
return GaugeType
default:
return UnknownCounterReset
}
}
type histogramIterator struct {
br bstreamReader
numTotal uint16
numRead uint16
counterResetHeader CounterResetHeader
// Layout:
schema int32
zThreshold float64
pSpans, nSpans []histogram.Span
customValues []float64
// For the fields that are tracked as deltas and ultimately dod's.
t int64
cnt, zCnt uint64
tDelta, cntDelta, zCntDelta int64
pBuckets, nBuckets []int64 // Delta between buckets.
pFloatBuckets, nFloatBuckets []float64 // Absolute counts.
pBucketsDelta, nBucketsDelta []int64
// The sum is Gorilla xor encoded.
sum float64
leading uint8
trailing uint8
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/chunkenc/xor.go | tsdb/chunkenc/xor.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below.
// It was modified to accommodate reading from byte slices without modifying
// the underlying bytes, which would panic when reading from mmapped
// read-only byte slices.
// Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package chunkenc
import (
"encoding/binary"
"math"
"math/bits"
"github.com/prometheus/prometheus/model/histogram"
)
const (
chunkHeaderSize = 2
chunkAllocationSize = 128
chunkCompactCapacityThreshold = 32
)
// XORChunk holds XOR encoded sample data.
type XORChunk struct {
b bstream
}
// NewXORChunk returns a new chunk with XOR encoding.
func NewXORChunk() *XORChunk {
b := make([]byte, chunkHeaderSize, chunkAllocationSize)
return &XORChunk{b: bstream{stream: b, count: 0}}
}
func (c *XORChunk) Reset(stream []byte) {
c.b.Reset(stream)
}
// Encoding returns the encoding type.
func (*XORChunk) Encoding() Encoding {
return EncXOR
}
// Bytes returns the underlying byte slice of the chunk.
func (c *XORChunk) Bytes() []byte {
return c.b.bytes()
}
// NumSamples returns the number of samples in the chunk.
func (c *XORChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
// Compact implements the Chunk interface.
func (c *XORChunk) Compact() {
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
buf := make([]byte, l)
copy(buf, c.b.stream)
c.b.stream = buf
}
}
// Appender implements the Chunk interface.
// It is not valid to call Appender() multiple times concurrently or to use multiple
// Appenders on the same chunk.
func (c *XORChunk) Appender() (Appender, error) {
if len(c.b.stream) == chunkHeaderSize { // Avoid allocating an Iterator when chunk is empty.
return &xorAppender{b: &c.b, t: math.MinInt64, leading: 0xff}, nil
}
it := c.iterator(nil)
// To get an appender we must know the state it would have if we had
// appended all existing data from scratch.
// We iterate through the end and populate via the iterator's state.
for it.Next() != ValNone {
}
if err := it.Err(); err != nil {
return nil, err
}
a := &xorAppender{
b: &c.b,
t: it.t,
v: it.val,
tDelta: it.tDelta,
leading: it.leading,
trailing: it.trailing,
}
return a, nil
}
func (c *XORChunk) iterator(it Iterator) *xorIterator {
if xorIter, ok := it.(*xorIterator); ok {
xorIter.Reset(c.b.bytes())
return xorIter
}
return &xorIterator{
// The first 2 bytes contain chunk headers.
// We skip that for actual samples.
br: newBReader(c.b.bytes()[chunkHeaderSize:]),
numTotal: binary.BigEndian.Uint16(c.b.bytes()),
t: math.MinInt64,
}
}
// Iterator implements the Chunk interface.
// Iterator() must not be called concurrently with any modifications to the chunk,
// but after it returns you can use an Iterator concurrently with an Appender or
// other Iterators.
func (c *XORChunk) Iterator(it Iterator) Iterator {
return c.iterator(it)
}
type xorAppender struct {
b *bstream
t int64
v float64
tDelta uint64
leading uint8
trailing uint8
}
func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes())
switch num {
case 0:
buf := make([]byte, binary.MaxVarintLen64)
for _, b := range buf[:binary.PutVarint(buf, t)] {
a.b.writeByte(b)
}
a.b.writeBits(math.Float64bits(v), 64)
case 1:
tDelta = uint64(t - a.t)
buf := make([]byte, binary.MaxVarintLen64)
for _, b := range buf[:binary.PutUvarint(buf, tDelta)] {
a.b.writeByte(b)
}
a.writeVDelta(v)
default:
tDelta = uint64(t - a.t)
dod := int64(tDelta - a.tDelta)
// Gorilla has a max resolution of seconds, Prometheus milliseconds.
// Thus we use higher value range steps with larger bit size.
//
// TODO(beorn7): This seems to needlessly jump to large bit
// sizes even for very small deviations from zero. Timestamp
// compression can probably benefit from some smaller bit
// buckets. See also what was done for histogram encoding in
// varbit.go.
switch {
case dod == 0:
a.b.writeBit(zero)
case bitRange(dod, 14):
a.b.writeByte(0b10<<6 | (uint8(dod>>8) & (1<<6 - 1))) // 0b10 size code combined with 6 bits of dod.
a.b.writeByte(uint8(dod)) // Bottom 8 bits of dod.
case bitRange(dod, 17):
a.b.writeBits(0b110, 3)
a.b.writeBits(uint64(dod), 17)
case bitRange(dod, 20):
a.b.writeBits(0b1110, 4)
a.b.writeBits(uint64(dod), 20)
default:
a.b.writeBits(0b1111, 4)
a.b.writeBits(uint64(dod), 64)
}
a.writeVDelta(v)
}
a.t = t
a.v = v
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
a.tDelta = tDelta
}
// bitRange returns whether the given integer can be represented by nbits.
// See docs/bstream.md.
func bitRange(x int64, nbits uint8) bool {
return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1)
}
func (a *xorAppender) writeVDelta(v float64) {
xorWrite(a.b, v, a.v, &a.leading, &a.trailing)
}
func (*xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float chunk")
}
func (*xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a float chunk")
}
type xorIterator struct {
br bstreamReader
numTotal uint16
numRead uint16
t int64
val float64
leading uint8
trailing uint8
tDelta uint64
err error
}
func (it *xorIterator) Seek(t int64) ValueType {
if it.err != nil {
return ValNone
}
for t > it.t || it.numRead == 0 {
if it.Next() == ValNone {
return ValNone
}
}
return ValFloat
}
func (it *xorIterator) At() (int64, float64) {
return it.t, it.val
}
func (*xorIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
panic("cannot call xorIterator.AtHistogram")
}
func (*xorIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
panic("cannot call xorIterator.AtFloatHistogram")
}
func (it *xorIterator) AtT() int64 {
return it.t
}
func (it *xorIterator) Err() error {
return it.err
}
func (it *xorIterator) Reset(b []byte) {
// The first 2 bytes contain chunk headers.
// We skip that for actual samples.
it.br = newBReader(b[chunkHeaderSize:])
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
it.t = 0
it.val = 0
it.leading = 0
it.trailing = 0
it.tDelta = 0
it.err = nil
}
func (it *xorIterator) Next() ValueType {
if it.err != nil || it.numRead == it.numTotal {
return ValNone
}
if it.numRead == 0 {
t, err := binary.ReadVarint(&it.br)
if err != nil {
it.err = err
return ValNone
}
v, err := it.br.readBits(64)
if err != nil {
it.err = err
return ValNone
}
it.t = t
it.val = math.Float64frombits(v)
it.numRead++
return ValFloat
}
if it.numRead == 1 {
tDelta, err := binary.ReadUvarint(&it.br)
if err != nil {
it.err = err
return ValNone
}
it.tDelta = tDelta
it.t += int64(it.tDelta)
return it.readValue()
}
var d byte
// read delta-of-delta
for range 4 {
d <<= 1
bit, err := it.br.readBitFast()
if err != nil {
bit, err = it.br.readBit()
}
if err != nil {
it.err = err
return ValNone
}
if bit == zero {
break
}
d |= 1
}
var sz uint8
var dod int64
switch d {
case 0b0:
// dod == 0
case 0b10:
sz = 14
case 0b110:
sz = 17
case 0b1110:
sz = 20
case 0b1111:
// Do not use fast because it's very unlikely it will succeed.
bits, err := it.br.readBits(64)
if err != nil {
it.err = err
return ValNone
}
dod = int64(bits)
}
if sz != 0 {
bits, err := it.br.readBitsFast(sz)
if err != nil {
bits, err = it.br.readBits(sz)
}
if err != nil {
it.err = err
return ValNone
}
// Account for negative numbers, which come back as high unsigned numbers.
// See docs/bstream.md.
if bits > (1 << (sz - 1)) {
bits -= 1 << sz
}
dod = int64(bits)
}
it.tDelta = uint64(int64(it.tDelta) + dod)
it.t += int64(it.tDelta)
return it.readValue()
}
func (it *xorIterator) readValue() ValueType {
err := xorRead(&it.br, &it.val, &it.leading, &it.trailing)
if err != nil {
it.err = err
return ValNone
}
it.numRead++
return ValFloat
}
func xorWrite(b *bstream, newValue, currentValue float64, leading, trailing *uint8) {
delta := math.Float64bits(newValue) ^ math.Float64bits(currentValue)
if delta == 0 {
b.writeBit(zero)
return
}
b.writeBit(one)
newLeading := uint8(bits.LeadingZeros64(delta))
newTrailing := uint8(bits.TrailingZeros64(delta))
// Clamp number of leading zeros to avoid overflow when encoding.
if newLeading >= 32 {
newLeading = 31
}
if *leading != 0xff && newLeading >= *leading && newTrailing >= *trailing {
// In this case, we stick with the current leading/trailing.
b.writeBit(zero)
b.writeBits(delta>>*trailing, 64-int(*leading)-int(*trailing))
return
}
// Update leading/trailing for the caller.
*leading, *trailing = newLeading, newTrailing
b.writeBit(one)
b.writeBits(uint64(newLeading), 5)
// Note that if newLeading == newTrailing == 0, then sigbits == 64. But
// that value doesn't actually fit into the 6 bits we have. Luckily, we
// never need to encode 0 significant bits, since that would put us in
// the other case (vdelta == 0). So instead we write out a 0 and adjust
// it back to 64 on unpacking.
sigbits := 64 - newLeading - newTrailing
b.writeBits(uint64(sigbits), 6)
b.writeBits(delta>>newTrailing, int(sigbits))
}
func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error {
bit, err := br.readBitFast()
if err != nil {
bit, err = br.readBit()
}
if err != nil {
return err
}
if bit == zero {
return nil
}
bit, err = br.readBitFast()
if err != nil {
bit, err = br.readBit()
}
if err != nil {
return err
}
var (
bits uint64
newLeading, newTrailing, mbits uint8
)
if bit == zero {
// Reuse leading/trailing zero bits.
newLeading, newTrailing = *leading, *trailing
mbits = 64 - newLeading - newTrailing
} else {
bits, err = br.readBitsFast(5)
if err != nil {
bits, err = br.readBits(5)
}
if err != nil {
return err
}
newLeading = uint8(bits)
bits, err = br.readBitsFast(6)
if err != nil {
bits, err = br.readBits(6)
}
if err != nil {
return err
}
mbits = uint8(bits)
// 0 significant bits here means we overflowed and we actually
// need 64; see comment in xrWrite.
if mbits == 0 {
mbits = 64
}
newTrailing = 64 - newLeading - mbits
// Update leading/trailing zero bits for the caller.
*leading, *trailing = newLeading, newTrailing
}
bits, err = br.readBitsFast(mbits)
if err != nil {
bits, err = br.readBits(mbits)
}
if err != nil {
return err
}
vbits := math.Float64bits(*value)
vbits ^= bits << newTrailing
*value = math.Float64frombits(vbits)
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/wlog.go | tsdb/wlog/wlog.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
"log/slog"
"os"
"path/filepath"
"slices"
"strconv"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/util/compression"
)
const (
DefaultSegmentSize = 128 * 1024 * 1024 // DefaultSegmentSize is 128 MB.
pageSize = 32 * 1024 // pageSize is 32KB.
recordHeaderSize = 7
WblDirName = "wbl"
)
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// page is an in memory buffer used to batch disk writes.
// Records bigger than the page size are split and flushed separately.
// A flush is triggered when a single records doesn't fit the page size or
// when the next record can't fit in the remaining free page space.
type page struct {
alloc int
flushed int
buf [pageSize]byte
}
func (p *page) remaining() int {
return pageSize - p.alloc
}
func (p *page) full() bool {
return pageSize-p.alloc < recordHeaderSize
}
func (p *page) reset() {
for i := range p.buf {
p.buf[i] = 0
}
p.alloc = 0
p.flushed = 0
}
// SegmentFile represents the underlying file used to store a segment.
type SegmentFile interface {
Stat() (os.FileInfo, error)
Sync() error
io.Writer
io.Reader
io.Closer
}
// Segment represents a segment file.
type Segment struct {
SegmentFile
dir string
i int
}
// Index returns the index of the segment.
func (s *Segment) Index() int {
return s.i
}
// Dir returns the directory of the segment.
func (s *Segment) Dir() string {
return s.dir
}
// CorruptionErr is an error that's returned when corruption is encountered.
type CorruptionErr struct {
Dir string
Segment int
Offset int64
Err error
}
func (e *CorruptionErr) Error() string {
if e.Segment < 0 {
return fmt.Sprintf("corruption after %d bytes: %s", e.Offset, e.Err)
}
return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err)
}
func (e *CorruptionErr) Unwrap() error {
return e.Err
}
// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends.
func OpenWriteSegment(logger *slog.Logger, dir string, k int) (*Segment, error) {
segName := SegmentName(dir, k)
f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
// If the last page is torn, fill it with zeros.
// In case it was torn after all records were written successfully, this
// will just pad the page and everything will be fine.
// If it was torn mid-record, a full read (which the caller should do anyway
// to ensure integrity) will detect it as a corruption by the end.
if d := stat.Size() % pageSize; d != 0 {
logger.Warn("Last page of the wlog is torn, filling it with zeros", "segment", segName)
if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
f.Close()
return nil, fmt.Errorf("zero-pad torn page: %w", err)
}
}
return &Segment{SegmentFile: f, i: k, dir: dir}, nil
}
// CreateSegment creates a new segment k in dir.
func CreateSegment(dir string, k int) (*Segment, error) {
f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o666)
if err != nil {
return nil, err
}
return &Segment{SegmentFile: f, i: k, dir: dir}, nil
}
// OpenReadSegment opens the segment with the given filename.
func OpenReadSegment(fn string) (*Segment, error) {
k, err := strconv.Atoi(filepath.Base(fn))
if err != nil {
return nil, errors.New("not a valid filename")
}
f, err := os.Open(fn)
if err != nil {
return nil, err
}
return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil
}
// WL is a write log that stores records in segment files.
// It must be read from start to end once before logging new data.
// If an error occurs during read, the repair procedure must be called
// before it's safe to do further writes.
//
// Segments are written to in pages of 32KB, with records possibly split
// across page boundaries.
// Records are never split across segments to allow full segments to be
// safely truncated. It also ensures that torn writes never corrupt records
// beyond the most recent segment.
type WL struct {
dir string
logger *slog.Logger
segmentSize int
mtx sync.RWMutex
segment *Segment // Active segment.
donePages int // Pages written to the segment.
page *page // Active page.
stopc chan chan struct{}
actorc chan func()
closed bool // To allow calling Close() more than once without blocking.
compress compression.Type
cEnc compression.EncodeBuffer
WriteNotified WriteNotified
metrics *wlMetrics
}
type wlMetrics struct {
fsyncDuration prometheus.Summary
pageFlushes prometheus.Counter
pageCompletions prometheus.Counter
truncateFail prometheus.Counter
truncateTotal prometheus.Counter
currentSegment prometheus.Gauge
writesFailed prometheus.Counter
walFileSize prometheus.GaugeFunc
recordPartWrites prometheus.Counter
recordPartBytes prometheus.Counter
recordBytesSaved *prometheus.CounterVec
r prometheus.Registerer
}
func (w *wlMetrics) Unregister() {
if w.r == nil {
return
}
w.r.Unregister(w.fsyncDuration)
w.r.Unregister(w.pageFlushes)
w.r.Unregister(w.pageCompletions)
w.r.Unregister(w.truncateFail)
w.r.Unregister(w.truncateTotal)
w.r.Unregister(w.currentSegment)
w.r.Unregister(w.writesFailed)
w.r.Unregister(w.walFileSize)
w.r.Unregister(w.recordPartWrites)
w.r.Unregister(w.recordPartBytes)
w.r.Unregister(w.recordBytesSaved)
}
func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics {
return &wlMetrics{
r: r,
fsyncDuration: promauto.With(r).NewSummary(prometheus.SummaryOpts{
Name: "fsync_duration_seconds",
Help: "Duration of write log fsync.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
pageFlushes: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "page_flushes_total",
Help: "Total number of page flushes.",
}),
pageCompletions: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "completed_pages_total",
Help: "Total number of completed pages.",
}),
truncateFail: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "truncations_failed_total",
Help: "Total number of write log truncations that failed.",
}),
truncateTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "truncations_total",
Help: "Total number of write log truncations attempted.",
}),
currentSegment: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Name: "segment_current",
Help: "Write log segment index that TSDB is currently writing to.",
}),
writesFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "writes_failed_total",
Help: "Total number of write log writes that failed.",
}),
walFileSize: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{
Name: "storage_size_bytes",
Help: "Size of the write log directory.",
}, func() float64 {
val, err := w.Size()
if err != nil {
w.logger.Error("Failed to calculate size of \"wal\" dir", "err", err.Error())
}
return float64(val)
}),
recordPartWrites: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "record_part_writes_total",
Help: "Total number of record parts written before flushing.",
}),
recordPartBytes: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "record_parts_bytes_written_total",
Help: "Total number of record part bytes written before flushing, including" +
" CRC and compression headers.",
}),
recordBytesSaved: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Name: "record_bytes_saved_total",
Help: "Total number of bytes saved by the optional record compression." +
" Use this metric to learn about the effectiveness compression.",
}, []string{"compression"}),
}
}
// New returns a new WAL over the given directory.
func New(logger *slog.Logger, reg prometheus.Registerer, dir string, compress compression.Type) (*WL, error) {
return NewSize(logger, reg, dir, DefaultSegmentSize, compress)
}
// NewSize returns a new write log over the given directory.
// New segments are created with the specified size.
func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress compression.Type) (*WL, error) {
if segmentSize%pageSize != 0 {
return nil, errors.New("invalid segment size")
}
if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, fmt.Errorf("create dir: %w", err)
}
if logger == nil {
logger = promslog.NewNopLogger()
}
w := &WL{
dir: dir,
logger: logger,
segmentSize: segmentSize,
page: &page{},
actorc: make(chan func(), 100),
stopc: make(chan chan struct{}),
compress: compress,
cEnc: compression.NewSyncEncodeBuffer(),
}
prefix := "prometheus_tsdb_wal_"
if filepath.Base(dir) == WblDirName {
prefix = "prometheus_tsdb_out_of_order_wbl_"
}
w.metrics = newWLMetrics(w, prometheus.WrapRegistererWithPrefix(prefix, reg))
_, last, err := Segments(w.Dir())
if err != nil {
return nil, fmt.Errorf("get segment range: %w", err)
}
// Index of the Segment we want to open and write to.
writeSegmentIndex := 0
// If some segments already exist create one with a higher index than the last segment.
if last != -1 {
writeSegmentIndex = last + 1
}
segment, err := CreateSegment(w.Dir(), writeSegmentIndex)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
go w.run()
return w, nil
}
// Open an existing WAL.
func Open(logger *slog.Logger, dir string) (*WL, error) {
if logger == nil {
logger = promslog.NewNopLogger()
}
w := &WL{
dir: dir,
logger: logger,
}
return w, nil
}
// CompressionType returns if compression is enabled on this WAL.
func (w *WL) CompressionType() compression.Type {
return w.compress
}
// Dir returns the directory of the WAL.
func (w *WL) Dir() string {
return w.dir
}
func (w *WL) SetWriteNotified(wn WriteNotified) {
w.WriteNotified = wn
}
func (w *WL) run() {
Loop:
for {
select {
case f := <-w.actorc:
f()
case donec := <-w.stopc:
close(w.actorc)
defer close(donec)
break Loop
}
}
// Drain and process any remaining functions.
for f := range w.actorc {
f()
}
}
// Repair attempts to repair the WAL based on the error.
// It discards all data after the corruption.
func (w *WL) Repair(origErr error) error {
// We could probably have a mode that only discards torn records right around
// the corruption to preserve as data much as possible.
// But that's not generally applicable if the records have any kind of causality.
// Maybe as an extra mode in the future if mid-WAL corruptions become
// a frequent concern.
var cerr *CorruptionErr
if !errors.As(origErr, &cerr) {
return fmt.Errorf("cannot handle error: %w", origErr)
}
if cerr.Segment < 0 {
return errors.New("corruption error does not specify position")
}
w.logger.Warn("Starting corruption repair",
"segment", cerr.Segment, "offset", cerr.Offset)
// All segments behind the corruption can no longer be used.
segs, err := listSegments(w.Dir())
if err != nil {
return fmt.Errorf("list segments: %w", err)
}
w.logger.Warn("Deleting all segments newer than corrupted segment", "segment", cerr.Segment)
for _, s := range segs {
if w.segment.i == s.index {
// The active segment needs to be removed,
// close it first (Windows!). Can be closed safely
// as we set the current segment to repaired file
// below.
if err := w.segment.Close(); err != nil {
return fmt.Errorf("close active segment: %w", err)
}
}
if s.index <= cerr.Segment {
continue
}
if err := os.Remove(filepath.Join(w.Dir(), s.name)); err != nil {
return fmt.Errorf("delete segment:%v: %w", s.index, err)
}
}
// Regardless of the corruption offset, no record reaches into the previous segment.
// So we can safely repair the WAL by removing the segment and re-inserting all
// its records up to the corruption.
w.logger.Warn("Rewrite corrupted segment", "segment", cerr.Segment)
fn := SegmentName(w.Dir(), cerr.Segment)
tmpfn := fn + ".repair"
if err := fileutil.Rename(fn, tmpfn); err != nil {
return err
}
// Create a clean segment and make it the active one.
s, err := CreateSegment(w.Dir(), cerr.Segment)
if err != nil {
return err
}
if err := w.setSegment(s); err != nil {
return err
}
f, err := os.Open(tmpfn)
if err != nil {
return fmt.Errorf("open segment: %w", err)
}
defer f.Close()
r := NewReader(bufio.NewReader(f))
for r.Next() {
// Add records only up to the where the error was.
if r.Offset() >= cerr.Offset {
break
}
if err := w.Log(r.Record()); err != nil {
return fmt.Errorf("insert record: %w", err)
}
}
// We expect an error here from r.Err(), so nothing to handle.
// We need to pad to the end of the last page in the repaired segment
if err := w.flushPage(true); err != nil {
return fmt.Errorf("flush page in repair: %w", err)
}
// We explicitly close even when there is a defer for Windows to be
// able to delete it. The defer is in place to close it in-case there
// are errors above.
if err := f.Close(); err != nil {
return fmt.Errorf("close corrupted file: %w", err)
}
if err := os.Remove(tmpfn); err != nil {
return fmt.Errorf("delete corrupted segment: %w", err)
}
// Explicitly close the segment we just repaired to avoid issues with Windows.
s.Close()
// We always want to start writing to a new Segment rather than an existing
// Segment, which is handled by NewSize, but earlier in Repair we're deleting
// all segments that come after the corrupted Segment. Recreate a new Segment here.
s, err = CreateSegment(w.Dir(), cerr.Segment+1)
if err != nil {
return err
}
return w.setSegment(s)
}
// SegmentName builds a segment name for the directory.
func SegmentName(dir string, i int) string {
return filepath.Join(dir, fmt.Sprintf("%08d", i))
}
// NextSegment creates the next segment and closes the previous one asynchronously.
// It returns the file number of the new file.
func (w *WL) NextSegment() (int, error) {
w.mtx.Lock()
defer w.mtx.Unlock()
return w.nextSegment(true)
}
// NextSegmentSync creates the next segment and closes the previous one in sync.
// It returns the file number of the new file.
func (w *WL) NextSegmentSync() (int, error) {
w.mtx.Lock()
defer w.mtx.Unlock()
return w.nextSegment(false)
}
// nextSegment creates the next segment and closes the previous one.
// It returns the file number of the new file.
func (w *WL) nextSegment(async bool) (int, error) {
if w.closed {
return 0, errors.New("wlog is closed")
}
// Only flush the current page if it actually holds data.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return 0, err
}
}
next, err := CreateSegment(w.Dir(), w.segment.Index()+1)
if err != nil {
return 0, fmt.Errorf("create new segment file: %w", err)
}
prev := w.segment
if err := w.setSegment(next); err != nil {
return 0, err
}
// Don't block further writes by fsyncing the last segment.
f := func() {
if err := w.fsync(prev); err != nil {
w.logger.Error("sync previous segment", "err", err)
}
if err := prev.Close(); err != nil {
w.logger.Error("close previous segment", "err", err)
}
}
if async {
w.actorc <- f
} else {
f()
}
return next.Index(), nil
}
func (w *WL) setSegment(segment *Segment) error {
w.segment = segment
// Correctly initialize donePages.
stat, err := segment.Stat()
if err != nil {
return err
}
w.donePages = int(stat.Size() / pageSize)
w.metrics.currentSegment.Set(float64(segment.Index()))
return nil
}
// flushPage writes the new contents of the page to disk. If no more records will fit into
// the page, the remaining bytes will be set to zero and a new page will be started.
// If forceClear is true, this is enforced regardless of how many bytes are left in the page.
func (w *WL) flushPage(forceClear bool) error {
w.metrics.pageFlushes.Inc()
p := w.page
shouldClear := forceClear || p.full()
// No more data will fit into the page or an implicit clear.
// Enqueue and clear it.
if shouldClear {
p.alloc = pageSize // Write till end of page.
}
n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
if err != nil {
p.flushed += n
return err
}
p.flushed += n
// We flushed an entire page, prepare a new one.
if shouldClear {
p.reset()
w.donePages++
w.metrics.pageCompletions.Inc()
}
return nil
}
// First Byte of header format:
//
// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ]
const (
snappyMask = 1 << 3
zstdMask = 1 << 4
recTypeMask = snappyMask - 1
)
type recType uint8
const (
recPageTerm recType = 0 // Rest of page is empty.
recFull recType = 1 // Full record.
recFirst recType = 2 // First fragment of a record.
recMiddle recType = 3 // Middle fragments of a record.
recLast recType = 4 // Final fragment of a record.
)
func recTypeFromHeader(header byte) recType {
return recType(header & recTypeMask)
}
func (t recType) String() string {
switch t {
case recPageTerm:
return "zero"
case recFull:
return "full"
case recFirst:
return "first"
case recMiddle:
return "middle"
case recLast:
return "last"
default:
return "<invalid>"
}
}
func (w *WL) pagesPerSegment() int {
return w.segmentSize / pageSize
}
// Log writes the records into the log.
// Multiple records can be passed at once to reduce writes and increase throughput.
func (w *WL) Log(recs ...[]byte) error {
w.mtx.Lock()
defer w.mtx.Unlock()
// Callers could just implement their own list record format but adding
// a bit of extra logic here frees them from that overhead.
for i, r := range recs {
if err := w.log(r, i == len(recs)-1); err != nil {
w.metrics.writesFailed.Inc()
return err
}
}
return nil
}
// log writes rec to the log and forces a flush of the current page if:
// - the final record of a batch
// - the record is bigger than the page size
// - the current page is full.
func (w *WL) log(rec []byte, final bool) error {
// When the last page flush failed the page will remain full.
// When the page is full, need to flush it before trying to add more records to it.
if w.page.full() {
if err := w.flushPage(true); err != nil {
return err
}
}
// Compress the record before calculating if a new segment is needed.
finalCompression := w.compress
enc, err := compression.Encode(w.compress, rec, w.cEnc)
if err != nil {
return err
}
if w.compress != compression.None {
savedBytes := len(rec) - len(enc)
// Even if the compression was applied, skip it, if there's no benefit
// in the WAL record size (we have a choice). For small records e.g. snappy
// compression can yield larger records than the uncompressed.
if savedBytes <= 0 {
enc = rec
finalCompression = compression.None
savedBytes = 0
}
w.metrics.recordBytesSaved.WithLabelValues(w.compress).Add(float64(savedBytes))
}
// If the record is too big to fit within the active page in the current
// segment, terminate the active segment and advance to the next one.
// This ensures that records do not cross segment boundaries.
left := w.page.remaining() - recordHeaderSize // Free space in the active page.
left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
if len(enc) > left {
if _, err := w.nextSegment(true); err != nil {
return err
}
}
// Populate as many pages as necessary to fit the record.
// Be careful to always do one pass to ensure we write zero-length records.
for i := 0; i == 0 || len(enc) > 0; i++ {
p := w.page
// Find how much of the record we can fit into the page.
var (
l = min(len(enc), (pageSize-p.alloc)-recordHeaderSize)
part = enc[:l]
buf = p.buf[p.alloc:]
typ recType
)
switch {
case i == 0 && len(part) == len(enc):
typ = recFull
case len(part) == len(enc):
typ = recLast
case i == 0:
typ = recFirst
default:
typ = recMiddle
}
if finalCompression != compression.None {
switch finalCompression {
case compression.Snappy:
typ |= snappyMask
case compression.Zstd:
typ |= zstdMask
default:
return fmt.Errorf("unsupported compression type: %v", finalCompression)
}
}
buf[0] = byte(typ)
crc := crc32.Checksum(part, castagnoliTable)
binary.BigEndian.PutUint16(buf[1:], uint16(len(part)))
binary.BigEndian.PutUint32(buf[3:], crc)
copy(buf[recordHeaderSize:], part)
p.alloc += len(part) + recordHeaderSize
w.metrics.recordPartWrites.Inc()
w.metrics.recordPartBytes.Add(float64(len(part) + recordHeaderSize))
if w.page.full() {
if err := w.flushPage(true); err != nil {
// TODO When the flushing fails at this point and the record has not been
// fully written to the buffer, we end up with a corrupted WAL because some part of the
// record have been written to the buffer, while the rest of the record will be discarded.
return err
}
}
enc = enc[l:]
}
// If it's the final record of the batch and the page is not empty, flush it.
if final && w.page.alloc > 0 {
if err := w.flushPage(false); err != nil {
return err
}
}
return nil
}
// LastSegmentAndOffset returns the last segment number of the WAL
// and the offset in that file upto which the segment has been filled.
func (w *WL) LastSegmentAndOffset() (seg, offset int, err error) {
w.mtx.Lock()
defer w.mtx.Unlock()
_, seg, err = Segments(w.Dir())
if err != nil {
return seg, offset, err
}
offset = (w.donePages * pageSize) + w.page.alloc
return seg, offset, err
}
// Truncate drops all segments before i.
func (w *WL) Truncate(i int) (err error) {
w.metrics.truncateTotal.Inc()
defer func() {
if err != nil {
w.metrics.truncateFail.Inc()
}
}()
refs, err := listSegments(w.Dir())
if err != nil {
return err
}
for _, r := range refs {
if r.index >= i {
break
}
if err = os.Remove(filepath.Join(w.Dir(), r.name)); err != nil {
return err
}
}
return nil
}
func (w *WL) fsync(f *Segment) error {
start := time.Now()
err := f.Sync()
w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
return err
}
// Sync forces a file sync on the current write log segment. This function is meant
// to be used only on tests due to different behaviour on Operating Systems
// like windows and linux.
func (w *WL) Sync() error {
return w.fsync(w.segment)
}
// Close flushes all writes and closes active segment.
func (w *WL) Close() (err error) {
w.mtx.Lock()
defer w.mtx.Unlock()
if w.closed {
return errors.New("wlog already closed")
}
if w.segment == nil {
w.closed = true
return nil
}
// Flush the last page and zero out all its remaining size.
// We must not flush an empty page as it would falsely signal
// the segment is done if we start writing to it again after opening.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
donec := make(chan struct{})
w.stopc <- donec
<-donec
if err = w.fsync(w.segment); err != nil {
w.logger.Error("sync previous segment", "err", err)
}
if err := w.segment.Close(); err != nil {
w.logger.Error("close previous segment", "err", err)
}
w.metrics.Unregister()
w.closed = true
return nil
}
// Segments returns the range [first, n] of currently existing segments.
// If no segments are found, first and n are -1.
func Segments(wlDir string) (first, last int, err error) {
refs, err := listSegments(wlDir)
if err != nil {
return 0, 0, err
}
if len(refs) == 0 {
return -1, -1, nil
}
return refs[0].index, refs[len(refs)-1].index, nil
}
type segmentRef struct {
name string
index int
}
func listSegments(dir string) (refs []segmentRef, err error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range files {
fn := f.Name()
k, err := strconv.Atoi(fn)
if err != nil {
continue
}
refs = append(refs, segmentRef{name: fn, index: k})
}
slices.SortFunc(refs, func(a, b segmentRef) int {
return a.index - b.index
})
for i := 0; i < len(refs)-1; i++ {
if refs[i].index+1 != refs[i+1].index {
return nil, errors.New("segments are not sequential")
}
}
return refs, nil
}
// SegmentRange groups segments by the directory and the first and last index it includes.
type SegmentRange struct {
Dir string
First, Last int
}
// NewSegmentsReader returns a new reader over all segments in the directory.
func NewSegmentsReader(dir string) (io.ReadCloser, error) {
return NewSegmentsRangeReader(SegmentRange{dir, -1, -1})
}
// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges.
// If first or last are -1, the range is open on the respective end.
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) {
var segs []*Segment
for _, sgmRange := range sr {
refs, err := listSegments(sgmRange.Dir)
if err != nil {
return nil, fmt.Errorf("list segment in dir:%v: %w", sgmRange.Dir, err)
}
for _, r := range refs {
if sgmRange.First >= 0 && r.index < sgmRange.First {
continue
}
if sgmRange.Last >= 0 && r.index > sgmRange.Last {
break
}
s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name))
if err != nil {
return nil, fmt.Errorf("open segment:%v in dir:%v: %w", r.name, sgmRange.Dir, err)
}
segs = append(segs, s)
}
}
return NewSegmentBufReader(segs...), nil
}
// segmentBufReader is a buffered reader that reads in multiples of pages.
// The main purpose is that we are able to track segment and offset for
// corruption reporting. We have to be careful not to increment curr too
// early, as it is used by Reader.Err() to tell Repair which segment is corrupt.
// As such we pad the end of non-page align segments with zeros.
type segmentBufReader struct {
buf *bufio.Reader
segs []*Segment
cur int // Index into segs.
off int // Offset of read data into current segment.
}
func NewSegmentBufReader(segs ...*Segment) io.ReadCloser {
if len(segs) == 0 {
return &segmentBufReader{}
}
return &segmentBufReader{
buf: bufio.NewReaderSize(segs[0], 16*pageSize),
segs: segs,
}
}
func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (io.ReadCloser, error) {
if offset == 0 || len(segs) == 0 {
return NewSegmentBufReader(segs...), nil
}
sbr := &segmentBufReader{
buf: bufio.NewReaderSize(segs[0], 16*pageSize),
segs: segs,
}
var err error
if offset > 0 {
_, err = sbr.buf.Discard(offset)
}
return sbr, err
}
func (r *segmentBufReader) Close() (err error) {
for _, s := range r.segs {
if e := s.Close(); e != nil {
err = e
}
}
return err
}
// Read implements io.Reader.
func (r *segmentBufReader) Read(b []byte) (n int, err error) {
if len(r.segs) == 0 {
return 0, io.EOF
}
n, err = r.buf.Read(b)
r.off += n
// If we succeeded, or hit a non-EOF, we can stop.
if err == nil || !errors.Is(err, io.EOF) {
return n, err
}
// We hit EOF; fake out zero padding at the end of short segments, so we
// don't increment curr too early and report the wrong segment as corrupt.
if r.off%pageSize != 0 {
i := 0
for ; n+i < len(b) && (r.off+i)%pageSize != 0; i++ {
b[n+i] = 0
}
// Return early, even if we didn't fill b.
r.off += i
return n + i, nil
}
// There is no more deta left in the curr segment and there are no more
// segments left. Return EOF.
if r.cur+1 >= len(r.segs) {
return n, io.EOF
}
// Move to next segment.
r.cur++
r.off = 0
r.buf.Reset(r.segs[r.cur])
return n, nil
}
// Size computes the size of the write log.
// We do this by adding the sizes of all the files under the WAL dir.
func (w *WL) Size() (int64, error) {
return fileutil.DirSize(w.Dir())
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/checkpoint_test.go | tsdb/wlog/checkpoint_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
)
func TestLastCheckpoint(t *testing.T) {
dir := t.TempDir()
_, _, err := LastCheckpoint(dir)
require.Equal(t, record.ErrNotFound, err)
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0o777))
s, k, err := LastCheckpoint(dir)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
require.Equal(t, 0, k)
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0o777))
s, k, err = LastCheckpoint(dir)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
require.Equal(t, 0, k)
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0o777))
s, k, err = LastCheckpoint(dir)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.1"), s)
require.Equal(t, 1, k)
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0o777))
s, k, err = LastCheckpoint(dir)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.1000"), s)
require.Equal(t, 1000, k)
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777))
s, k, err = LastCheckpoint(dir)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s)
require.Equal(t, 99999999, k)
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777))
s, k, err = LastCheckpoint(dir)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s)
require.Equal(t, 100000000, k)
}
func TestDeleteCheckpoints(t *testing.T) {
dir := t.TempDir()
require.NoError(t, DeleteCheckpoints(dir, 0))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0o777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0o777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0o777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0o777))
require.NoError(t, DeleteCheckpoints(dir, 2))
files, err := os.ReadDir(dir)
require.NoError(t, err)
fns := []string{}
for _, f := range files {
fns = append(fns, f.Name())
}
require.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns)
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0o777))
require.NoError(t, DeleteCheckpoints(dir, 100000000))
files, err = os.ReadDir(dir)
require.NoError(t, err)
fns = []string{}
for _, f := range files {
fns = append(fns, f.Name())
}
require.Equal(t, []string{"checkpoint.100000000", "checkpoint.100000001"}, fns)
}
func TestCheckpoint(t *testing.T) {
t.Parallel()
makeHistogram := func(i int) *histogram.Histogram {
return &histogram.Histogram{
Count: 5 + uint64(i*4),
ZeroCount: 2 + uint64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
}
}
makeCustomBucketHistogram := func(i int) *histogram.Histogram {
return &histogram.Histogram{
Count: 5 + uint64(i*4),
ZeroCount: 2 + uint64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
CustomValues: []float64{0, 1, 2, 3, 4},
}
}
makeFloatHistogram := func(i int) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 5 + float64(i*4),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), 1, -1, 0},
}
}
makeCustomBucketFloatHistogram := func(i int) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 5 + float64(i*4),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
CustomValues: []float64{0, 1, 2, 3, 4},
}
}
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
var enc record.Encoder
// Create a dummy segment to bump the initial number.
seg, err := CreateSegment(dir, 100)
require.NoError(t, err)
require.NoError(t, seg.Close())
// Manually create checkpoint for 99 and earlier.
w, err := New(nil, nil, filepath.Join(dir, "checkpoint.0099"), compress)
require.NoError(t, err)
// Add some data we expect to be around later.
err = w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 1, Labels: labels.FromStrings("a", "b", "c", "1")},
}, nil))
require.NoError(t, err)
// Log an unknown record, that might have come from a future Prometheus version.
require.NoError(t, w.Log([]byte{255}))
require.NoError(t, w.Close())
// Start a WAL and write records to it as usual.
w, err = NewSize(nil, nil, dir, 128*1024, compress)
require.NoError(t, err)
samplesInWAL, histogramsInWAL, floatHistogramsInWAL := 0, 0, 0
var last int64
for i := 0; ; i++ {
_, n, err := Segments(w.Dir())
require.NoError(t, err)
if n >= 106 {
break
}
// Write some series initially.
if i == 0 {
b := enc.Series([]record.RefSeries{
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 3, Labels: labels.FromStrings("a", "b", "c", "3")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
{Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")},
}, nil)
require.NoError(t, w.Log(b))
b = enc.Metadata([]record.RefMetadata{
{Ref: 2, Unit: "unit", Help: "help"},
{Ref: 3, Unit: "unit", Help: "help"},
{Ref: 4, Unit: "unit", Help: "help"},
{Ref: 5, Unit: "unit", Help: "help"},
}, nil)
require.NoError(t, w.Log(b))
}
// Write samples until the WAL has enough segments.
// Make them have drifting timestamps within a record to see that they
// get filtered properly.
b := enc.Samples([]record.RefSample{
{Ref: 0, T: last, V: float64(i)},
{Ref: 1, T: last + 10000, V: float64(i)},
{Ref: 2, T: last + 20000, V: float64(i)},
{Ref: 3, T: last + 30000, V: float64(i)},
}, nil)
require.NoError(t, w.Log(b))
samplesInWAL += 4
h := makeHistogram(i)
b, _ = enc.HistogramSamples([]record.RefHistogramSample{
{Ref: 0, T: last, H: h},
{Ref: 1, T: last + 10000, H: h},
{Ref: 2, T: last + 20000, H: h},
{Ref: 3, T: last + 30000, H: h},
}, nil)
require.NoError(t, w.Log(b))
histogramsInWAL += 4
cbh := makeCustomBucketHistogram(i)
b = enc.CustomBucketsHistogramSamples([]record.RefHistogramSample{
{Ref: 0, T: last, H: cbh},
{Ref: 1, T: last + 10000, H: cbh},
{Ref: 2, T: last + 20000, H: cbh},
{Ref: 3, T: last + 30000, H: cbh},
}, nil)
require.NoError(t, w.Log(b))
histogramsInWAL += 4
fh := makeFloatHistogram(i)
b, _ = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: fh},
{Ref: 1, T: last + 10000, FH: fh},
{Ref: 2, T: last + 20000, FH: fh},
{Ref: 3, T: last + 30000, FH: fh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
cbfh := makeCustomBucketFloatHistogram(i)
b = enc.CustomBucketsFloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: cbfh},
{Ref: 1, T: last + 10000, FH: cbfh},
{Ref: 2, T: last + 20000, FH: cbfh},
{Ref: 3, T: last + 30000, FH: cbfh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
b = enc.Exemplars([]record.RefExemplar{
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i))},
}, nil)
require.NoError(t, w.Log(b))
// Write changing metadata for each series. In the end, only the latest
// version should end up in the checkpoint.
b = enc.Metadata([]record.RefMetadata{
{Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
}, nil)
require.NoError(t, w.Log(b))
last += 100
}
require.NoError(t, w.Close())
stats, err := Checkpoint(promslog.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
return x%2 == 0
}, last/2)
require.NoError(t, err)
require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples)
require.Positive(t, stats.DroppedSamples)
// Only the new checkpoint should be left.
files, err := os.ReadDir(dir)
require.NoError(t, err)
require.Len(t, files, 1)
require.Equal(t, "checkpoint.00000106", files[0].Name())
sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106"))
require.NoError(t, err)
defer sr.Close()
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
var series []record.RefSeries
var metadata []record.RefMetadata
r := NewReader(sr)
samplesInCheckpoint, histogramsInCheckpoint, floatHistogramsInCheckpoint := 0, 0, 0
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
series, err = dec.Series(rec, series)
require.NoError(t, err)
case record.Samples:
samples, err := dec.Samples(rec, nil)
require.NoError(t, err)
for _, s := range samples {
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
}
samplesInCheckpoint += len(samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
histograms, err := dec.HistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range histograms {
require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp")
}
histogramsInCheckpoint += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
floatHistograms, err := dec.FloatHistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range floatHistograms {
require.GreaterOrEqual(t, h.T, last/2, "float histogram with wrong timestamp")
}
floatHistogramsInCheckpoint += len(floatHistograms)
case record.Exemplars:
exemplars, err := dec.Exemplars(rec, nil)
require.NoError(t, err)
for _, e := range exemplars {
require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp")
}
case record.Metadata:
metadata, err = dec.Metadata(rec, metadata)
require.NoError(t, err)
}
}
require.NoError(t, r.Err())
// Making sure we replayed some samples. We expect >50% samples to be still present.
require.Greater(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.5)
require.Less(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.8)
require.Greater(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.5)
require.Less(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.8)
require.Greater(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.5)
require.Less(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.8)
expectedRefSeries := []record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
}
testutil.RequireEqual(t, expectedRefSeries, series)
expectedRefMetadata := []record.RefMetadata{
{Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 4, Unit: "unit", Help: "help"},
}
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
require.Equal(t, expectedRefMetadata, metadata)
})
}
}
func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
// Create a new wlog with invalid data.
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, 64*1024, compression.None)
require.NoError(t, err)
var enc record.Encoder
require.NoError(t, w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")},
}, nil)))
require.NoError(t, w.Close())
// Corrupt data.
f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666)
require.NoError(t, err)
_, err = f.WriteAt([]byte{42}, 1)
require.NoError(t, err)
require.NoError(t, f.Close())
// Run the checkpoint and since the wlog contains corrupt data this should return an error.
_, err = Checkpoint(promslog.NewNopLogger(), w, 0, 1, nil, 0)
require.Error(t, err)
// Walk the wlog dir to make sure there are no tmp folder left behind after the error.
err = filepath.Walk(w.Dir(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("access err %q: %w", path, err)
}
if info.IsDir() && strings.HasSuffix(info.Name(), ".tmp") {
return fmt.Errorf("wlog dir contains temporary folder:%s", info.Name())
}
return nil
})
require.NoError(t, err)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/reader.go | tsdb/wlog/reader.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
"github.com/prometheus/prometheus/util/compression"
)
// Reader reads WAL records from an io.Reader.
type Reader struct {
rdr io.Reader
err error
rec []byte
precomprBuf []byte
decBuf compression.DecodeBuffer
buf [pageSize]byte
total int64 // Total bytes processed.
curRecTyp recType // Used for checking that the last record is not torn.
}
// NewReader returns a new reader.
func NewReader(r io.Reader) *Reader {
return &Reader{rdr: r, decBuf: compression.NewSyncDecodeBuffer()}
}
// Next advances the reader to the next records and returns true if it exists.
// It must not be called again after it returned false.
func (r *Reader) Next() bool {
err := r.nextNew()
if err != nil && errors.Is(err, io.EOF) {
// The last WAL segment record shouldn't be torn(should be full or last).
// The last record would be torn after a crash just before
// the last record part could be persisted to disk.
if r.curRecTyp == recFirst || r.curRecTyp == recMiddle {
r.err = errors.New("last record is torn")
}
return false
}
r.err = err
return r.err == nil
}
func (r *Reader) nextNew() (err error) {
// We have to use r.buf since allocating byte arrays here fails escape
// analysis and ends up on the heap, even though it seemingly should not.
hdr := r.buf[:recordHeaderSize]
buf := r.buf[recordHeaderSize:]
r.precomprBuf = r.precomprBuf[:0]
i := 0
for {
if _, err = io.ReadFull(r.rdr, hdr[:1]); err != nil {
return fmt.Errorf("read first header byte: %w", err)
}
r.total++
r.curRecTyp = recTypeFromHeader(hdr[0])
compr := compression.None
if hdr[0]&snappyMask == snappyMask {
compr = compression.Snappy
} else if hdr[0]&zstdMask == zstdMask {
compr = compression.Zstd
}
// Gobble up zero bytes.
if r.curRecTyp == recPageTerm {
// recPageTerm is a single byte that indicates the rest of the page is padded.
// If it's the first byte in a page, buf is too small and
// needs to be resized to fit pageSize-1 bytes.
buf = r.buf[1:]
// We are pedantic and check whether the zeros are actually up
// to a page boundary.
// It's not strictly necessary but may catch sketchy state early.
k := pageSize - (r.total % pageSize)
if k == pageSize {
continue // Initial 0 byte was last page byte.
}
n, err := io.ReadFull(r.rdr, buf[:k])
if err != nil {
return fmt.Errorf("read remaining zeros: %w", err)
}
r.total += int64(n)
for _, c := range buf[:k] {
if c != 0 {
return errors.New("unexpected non-zero byte in padded page")
}
}
continue
}
n, err := io.ReadFull(r.rdr, hdr[1:])
if err != nil {
return fmt.Errorf("read remaining header: %w", err)
}
r.total += int64(n)
var (
length = binary.BigEndian.Uint16(hdr[1:])
crc = binary.BigEndian.Uint32(hdr[3:])
)
if length > pageSize-recordHeaderSize {
return fmt.Errorf("invalid record size %d", length)
}
n, err = io.ReadFull(r.rdr, buf[:length])
if err != nil {
return err
}
r.total += int64(n)
if n != int(length) {
return fmt.Errorf("invalid size: expected %d, got %d", length, n)
}
if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc {
return fmt.Errorf("unexpected checksum %x, expected %x", c, crc)
}
if err := validateRecord(r.curRecTyp, i); err != nil {
return err
}
r.precomprBuf = append(r.precomprBuf, buf[:length]...)
if r.curRecTyp == recLast || r.curRecTyp == recFull {
r.rec, err = compression.Decode(compr, r.precomprBuf, r.decBuf)
return err
}
// Only increment i for non-zero records since we use it
// to determine valid content record sequences.
i++
}
}
// Err returns the last encountered error wrapped in a corruption error.
// If the reader does not allow to infer a segment index and offset, a total
// offset in the reader stream will be provided.
func (r *Reader) Err() error {
if r.err == nil {
return nil
}
if b, ok := r.rdr.(*segmentBufReader); ok {
return &CorruptionErr{
Err: r.err,
Dir: b.segs[b.cur].Dir(),
Segment: b.segs[b.cur].Index(),
Offset: int64(b.off),
}
}
return &CorruptionErr{
Err: r.err,
Segment: -1,
Offset: r.total,
}
}
// Record returns the current record. The returned byte slice is only
// valid until the next call to Next.
func (r *Reader) Record() []byte {
return r.rec
}
// Segment returns the current segment being read.
func (r *Reader) Segment() int {
if b, ok := r.rdr.(*segmentBufReader); ok {
return b.segs[b.cur].Index()
}
return -1
}
// Offset returns the current position of the segment being read.
func (r *Reader) Offset() int64 {
if b, ok := r.rdr.(*segmentBufReader); ok {
return int64(b.off)
}
return r.total
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/reader_test.go | tsdb/wlog/reader_test.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"math/big"
"os"
"path/filepath"
"runtime"
"strconv"
"testing"
"time"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/util/compression"
)
type reader interface {
Next() bool
Err() error
Record() []byte
Offset() int64
}
type rec struct {
t recType
b []byte
}
var readerConstructors = map[string]func(io.Reader) reader{
"Reader": func(r io.Reader) reader {
return NewReader(r)
},
"LiveReader": func(r io.Reader) reader {
lr := NewLiveReader(promslog.NewNopLogger(), NewLiveReaderMetrics(nil), r)
lr.eofNonErr = true
return lr
},
}
var (
data = make([]byte, 100000)
testReaderCases = []struct {
t []rec
exp [][]byte
fail bool
}{
// Sequence of valid records.
{
t: []rec{
{recFull, data[0:200]},
{recFirst, data[200:300]},
{recLast, data[300:400]},
{recFirst, data[400:800]},
{recMiddle, data[800:900]},
{recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary.
{recLast, data[900:900]},
{recFirst, data[900:1000]},
{recMiddle, data[1000:1200]},
{recMiddle, data[1200:30000]},
{recMiddle, data[30000:30001]},
{recMiddle, data[30001:30001]},
{recLast, data[30001:32000]},
},
exp: [][]byte{
data[0:200],
data[200:400],
data[400:900],
data[900:32000],
},
},
// Exactly at the limit of one page minus the header size
{
t: []rec{
{recFull, data[0 : pageSize-recordHeaderSize]},
},
exp: [][]byte{
data[:pageSize-recordHeaderSize],
},
},
// More than a full page, this exceeds our buffer and can never happen
// when written by the WAL.
{
t: []rec{
{recFull, data[0 : pageSize+1]},
},
fail: true,
},
// Two records the together are too big for a page.
// NB currently the non-live reader succeeds on this. I think this is a bug.
// but we've seen it in production.
{
t: []rec{
{recFull, data[:pageSize/2]},
{recFull, data[:pageSize/2]},
},
exp: [][]byte{
data[:pageSize/2],
data[:pageSize/2],
},
},
// Invalid orders of record types.
{
t: []rec{{recMiddle, data[:200]}},
fail: true,
},
{
t: []rec{{recLast, data[:200]}},
fail: true,
},
{
t: []rec{
{recFirst, data[:200]},
{recFull, data[200:400]},
},
fail: true,
},
{
t: []rec{
{recFirst, data[:100]},
{recMiddle, data[100:200]},
{recFull, data[200:400]},
},
fail: true,
},
// Non-zero data after page termination.
{
t: []rec{
{recFull, data[:100]},
{recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)},
},
exp: [][]byte{data[:100]},
fail: true,
},
}
)
func encodedRecord(t recType, b []byte) []byte {
if t == recPageTerm {
return append([]byte{0}, b...)
}
r := make([]byte, recordHeaderSize)
r[0] = byte(t)
binary.BigEndian.PutUint16(r[1:], uint16(len(b)))
binary.BigEndian.PutUint32(r[3:], crc32.Checksum(b, castagnoliTable))
return append(r, b...)
}
// TestReader feeds the reader a stream of encoded records with different types.
func TestReader(t *testing.T) {
for name, fn := range readerConstructors {
for i, c := range testReaderCases {
t.Run(fmt.Sprintf("%s/%d", name, i), func(t *testing.T) {
var buf []byte
for _, r := range c.t {
buf = append(buf, encodedRecord(r.t, r.b)...)
}
r := fn(bytes.NewReader(buf))
for j := 0; r.Next(); j++ {
t.Logf("record %d", j)
rec := r.Record()
require.Less(t, j, len(c.exp), "received more records than expected")
require.Equal(t, c.exp[j], rec, "Bytes within record did not match expected Bytes")
}
if !c.fail {
require.NoError(t, r.Err())
} else {
require.Error(t, r.Err())
}
})
}
}
}
func TestReader_Live(t *testing.T) {
logger := promslog.NewNopLogger()
for i := range testReaderCases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
writeFd, err := os.CreateTemp("", "TestReader_Live")
require.NoError(t, err)
defer os.Remove(writeFd.Name())
go func(i int) {
for _, rec := range testReaderCases[i].t {
rec := encodedRecord(rec.t, rec.b)
_, err := writeFd.Write(rec)
require.NoError(t, err)
runtime.Gosched()
}
writeFd.Close()
}(i)
// Read from a second FD on the same file.
readFd, err := os.Open(writeFd.Name())
require.NoError(t, err)
reader := NewLiveReader(logger, NewLiveReaderMetrics(nil), readFd)
for _, exp := range testReaderCases[i].exp {
for !reader.Next() {
require.Equal(t, io.EOF, reader.Err(), "expect EOF, got: %v", reader.Err())
runtime.Gosched()
}
actual := reader.Record()
require.Equal(t, exp, actual, "read wrong record")
}
require.False(t, reader.Next(), "unexpected record")
if testReaderCases[i].fail {
require.Error(t, reader.Err())
}
})
}
}
const fuzzLen = 500
func generateRandomEntries(w *WL, records chan []byte) error {
var recs [][]byte
for i := range fuzzLen {
var sz int64
switch i % 5 {
case 0, 1:
sz = 50
case 2, 3:
sz = pageSize
default:
sz = pageSize * 8
}
n, err := rand.Int(rand.Reader, big.NewInt(sz))
if err != nil {
return err
}
rec := make([]byte, n.Int64())
if _, err := rand.Read(rec); err != nil {
return err
}
records <- rec
// Randomly batch up records.
recs = append(recs, rec)
n, err = rand.Int(rand.Reader, big.NewInt(int64(4)))
if err != nil {
return err
}
if int(n.Int64()) < 3 {
if err := w.Log(recs...); err != nil {
return err
}
recs = recs[:0]
}
}
return w.Log(recs...)
}
type multiReadCloser struct {
reader io.Reader
closers []io.Closer
}
func (m *multiReadCloser) Read(p []byte) (n int, err error) {
return m.reader.Read(p)
}
func (m *multiReadCloser) Close() error {
return tsdb_errors.NewMulti(tsdb_errors.CloseAll(m.closers)).Err()
}
func allSegments(dir string) (io.ReadCloser, error) {
seg, err := listSegments(dir)
if err != nil {
return nil, err
}
var readers []io.Reader
var closers []io.Closer
for _, r := range seg {
f, err := os.Open(filepath.Join(dir, r.name))
if err != nil {
return nil, err
}
readers = append(readers, f)
closers = append(closers, f)
}
return &multiReadCloser{
reader: io.MultiReader(readers...),
closers: closers,
}, nil
}
func TestReaderFuzz(t *testing.T) {
t.Parallel()
for name, fn := range readerConstructors {
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("%s,compress=%s", name, compress), func(t *testing.T) {
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, 128*pageSize, compress)
require.NoError(t, err)
// Buffering required as we're not reading concurrently.
input := make(chan []byte, fuzzLen)
err = generateRandomEntries(w, input)
require.NoError(t, err)
close(input)
err = w.Close()
require.NoError(t, err)
sr, err := allSegments(w.Dir())
require.NoError(t, err)
defer sr.Close()
reader := fn(sr)
for expected := range input {
require.True(t, reader.Next(), "expected record: %v", reader.Err())
r := reader.Record()
// Expected value may come as nil or empty slice, so it requires special comparison.
if len(expected) == 0 {
require.Empty(t, r)
} else {
require.Equal(t, expected, r, "read wrong record")
}
}
require.False(t, reader.Next(), "unexpected record")
})
}
}
}
func TestReaderFuzz_Live(t *testing.T) {
t.Parallel()
logger := promslog.NewNopLogger()
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, 128*pageSize, compress)
require.NoError(t, err)
defer w.Close()
// In the background, generate a stream of random records and write them
// to the WAL.
input := make(chan []byte, fuzzLen/10) // buffering required as we sometimes batch WAL writes.
done := make(chan struct{})
go func() {
err := generateRandomEntries(w, input)
require.NoError(t, err)
time.Sleep(100 * time.Millisecond)
close(done)
}()
// Tail the WAL and compare the results.
m, _, err := Segments(w.Dir())
require.NoError(t, err)
seg, err := OpenReadSegment(SegmentName(dir, m))
require.NoError(t, err)
defer seg.Close()
r := NewLiveReader(logger, nil, seg)
segmentTicker := time.NewTicker(100 * time.Millisecond)
readTicker := time.NewTicker(10 * time.Millisecond)
readSegment := func(r *LiveReader) bool {
for r.Next() {
rec := r.Record()
expected, ok := <-input
require.True(t, ok, "unexpected record")
// Expected value may come as nil or empty slice, so it requires special comparison.
if len(expected) == 0 {
require.Empty(t, rec)
} else {
require.Equal(t, expected, rec, "record does not match expected")
}
}
require.Equal(t, io.EOF, r.Err(), "expected EOF, got: %v", r.Err())
return true
}
outer:
for {
select {
case <-segmentTicker.C:
// check if new segments exist
_, last, err := Segments(w.Dir())
require.NoError(t, err)
if last <= seg.i {
continue
}
// read to end of segment.
readSegment(r)
fi, err := os.Stat(SegmentName(dir, seg.i))
require.NoError(t, err)
require.Equal(t, r.Offset(), fi.Size(), "expected to have read whole segment, but read %d of %d", r.Offset(), fi.Size())
seg, err = OpenReadSegment(SegmentName(dir, seg.i+1))
require.NoError(t, err)
defer seg.Close()
r = NewLiveReader(logger, nil, seg)
case <-readTicker.C:
readSegment(r)
case <-done:
readSegment(r)
break outer
}
}
require.Equal(t, io.EOF, r.Err(), "expected EOF")
})
}
}
func TestLiveReaderCorrupt_ShortFile(t *testing.T) {
// Write a corrupt WAL segment, there is one record of pageSize in length,
// but the segment is only half written.
logger := promslog.NewNopLogger()
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, pageSize, compression.None)
require.NoError(t, err)
rec := make([]byte, pageSize-recordHeaderSize)
_, err = rand.Read(rec)
require.NoError(t, err)
err = w.Log(rec)
require.NoError(t, err)
err = w.Close()
require.NoError(t, err)
segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666)
require.NoError(t, err)
err = segmentFile.Truncate(pageSize / 2)
require.NoError(t, err)
err = segmentFile.Close()
require.NoError(t, err)
// Try and LiveReader it.
m, _, err := Segments(w.Dir())
require.NoError(t, err)
seg, err := OpenReadSegment(SegmentName(dir, m))
require.NoError(t, err)
defer seg.Close()
r := NewLiveReader(logger, nil, seg)
require.False(t, r.Next(), "expected no records")
require.Equal(t, io.EOF, r.Err(), "expected error, got: %v", r.Err())
}
func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) {
// Write a corrupt WAL segment, when record len > page size.
logger := promslog.NewNopLogger()
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, pageSize*2, compression.None)
require.NoError(t, err)
rec := make([]byte, pageSize-recordHeaderSize)
_, err = rand.Read(rec)
require.NoError(t, err)
err = w.Log(rec)
require.NoError(t, err)
err = w.Close()
require.NoError(t, err)
segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666)
require.NoError(t, err)
// Override the record length
buf := make([]byte, 3)
buf[0] = byte(recFull)
binary.BigEndian.PutUint16(buf[1:], 0xFFFF)
_, err = segmentFile.WriteAt(buf, 0)
require.NoError(t, err)
err = segmentFile.Close()
require.NoError(t, err)
// Try and LiveReader it.
m, _, err := Segments(w.Dir())
require.NoError(t, err)
seg, err := OpenReadSegment(SegmentName(dir, m))
require.NoError(t, err)
defer seg.Close()
r := NewLiveReader(logger, NewLiveReaderMetrics(nil), seg)
require.False(t, r.Next(), "expected no records")
require.EqualError(t, r.Err(), "record length greater than a single page: 65542 > 32768", "expected error, got: %v", r.Err())
}
func TestReaderData(t *testing.T) {
dir := os.Getenv("WALDIR")
if dir == "" {
return
}
for name, fn := range readerConstructors {
t.Run(name, func(t *testing.T) {
w, err := New(nil, nil, dir, compression.Snappy)
require.NoError(t, err)
sr, err := allSegments(dir)
require.NoError(t, err)
reader := fn(sr)
for reader.Next() {
}
require.NoError(t, reader.Err())
err = w.Repair(reader.Err())
require.NoError(t, err)
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/watcher_test.go | tsdb/wlog/watcher_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"fmt"
"math/rand"
"os"
"path"
"runtime"
"sync"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/util/compression"
)
var (
defaultRetryInterval = 100 * time.Millisecond
defaultRetries = 100
wMetrics = NewWatcherMetrics(prometheus.DefaultRegisterer)
)
// retry executes f() n times at each interval until it returns true.
func retry(t *testing.T, interval time.Duration, n int, f func() bool) {
t.Helper()
ticker := time.NewTicker(interval)
for i := 0; i <= n; i++ {
if f() {
return
}
<-ticker.C
}
ticker.Stop()
t.Logf("function returned false")
}
// Overwrite readTimeout defined in watcher.go.
func overwriteReadTimeout(t *testing.T, val time.Duration) {
initialVal := readTimeout
readTimeout = val
t.Cleanup(func() { readTimeout = initialVal })
}
type writeToMock struct {
samplesAppended int
exemplarsAppended int
histogramsAppended int
floatHistogramsAppended int
seriesLock sync.Mutex
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
// If nonzero, delay reads with a short sleep.
delay time.Duration
}
func (wtm *writeToMock) Append(s []record.RefSample) bool {
time.Sleep(wtm.delay)
wtm.samplesAppended += len(s)
return true
}
func (wtm *writeToMock) AppendExemplars(e []record.RefExemplar) bool {
time.Sleep(wtm.delay)
wtm.exemplarsAppended += len(e)
return true
}
func (wtm *writeToMock) AppendHistograms(h []record.RefHistogramSample) bool {
time.Sleep(wtm.delay)
wtm.histogramsAppended += len(h)
return true
}
func (wtm *writeToMock) AppendFloatHistograms(fh []record.RefFloatHistogramSample) bool {
time.Sleep(wtm.delay)
wtm.floatHistogramsAppended += len(fh)
return true
}
func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) {
time.Sleep(wtm.delay)
wtm.UpdateSeriesSegment(series, index)
}
func (*writeToMock) StoreMetadata([]record.RefMetadata) { /* no-op */ }
func (wtm *writeToMock) UpdateSeriesSegment(series []record.RefSeries, index int) {
wtm.seriesLock.Lock()
defer wtm.seriesLock.Unlock()
for _, s := range series {
wtm.seriesSegmentIndexes[s.Ref] = index
}
}
func (wtm *writeToMock) SeriesReset(index int) {
// Check for series that are in segments older than the checkpoint
// that were not also present in the checkpoint.
wtm.seriesLock.Lock()
defer wtm.seriesLock.Unlock()
for k, v := range wtm.seriesSegmentIndexes {
if v < index {
delete(wtm.seriesSegmentIndexes, k)
}
}
}
func (wtm *writeToMock) checkNumSeries() int {
wtm.seriesLock.Lock()
defer wtm.seriesLock.Unlock()
return len(wtm.seriesSegmentIndexes)
}
func newWriteToMock(delay time.Duration) *writeToMock {
return &writeToMock{
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
delay: delay,
}
}
func TestTailSamples(t *testing.T) {
pageSize := 32 * 1024
const seriesCount = 10
const samplesCount = 250
const exemplarsCount = 25
const histogramsCount = 50
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
now := time.Now()
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, 128*pageSize, compress)
require.NoError(t, err)
defer func() {
require.NoError(t, w.Close())
}()
// Write to the initial segment then checkpoint.
for i := range seriesCount {
ref := i + 100
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for range samplesCount {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
for range exemplarsCount {
inner := rand.Intn(ref + 1)
exemplar := enc.Exemplars([]record.RefExemplar{
{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
V: float64(i),
Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", inner)),
},
}, nil)
require.NoError(t, w.Log(exemplar))
}
for range histogramsCount {
inner := rand.Intn(ref + 1)
hist := &histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{int64(i) + 1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{int64(-i) - 1},
}
histograms, _ := enc.HistogramSamples([]record.RefHistogramSample{{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
H: hist,
}}, nil)
require.NoError(t, w.Log(histograms))
customBucketHist := &histogram.Histogram{
Schema: -53,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
CustomValues: []float64{float64(i) + 2},
}
customBucketHistograms := enc.CustomBucketsHistogramSamples([]record.RefHistogramSample{{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
H: customBucketHist,
}}, nil)
require.NoError(t, w.Log(customBucketHistograms))
floatHistograms, _ := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
FH: hist.ToFloat(nil),
}}, nil)
require.NoError(t, w.Log(floatHistograms))
customBucketFloatHistograms := enc.CustomBucketsFloatHistogramSamples([]record.RefFloatHistogramSample{{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
FH: customBucketHist.ToFloat(nil),
}}, nil)
require.NoError(t, w.Log(customBucketFloatHistograms))
}
}
// Start read after checkpoint, no more data written.
first, last, err := Segments(w.Dir())
require.NoError(t, err)
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true, true, true)
watcher.SetStartTime(now)
// Set the Watcher's metrics so they're not nil pointers.
watcher.SetMetrics()
for i := first; i <= last; i++ {
segment, err := OpenReadSegment(SegmentName(watcher.walDir, i))
require.NoError(t, err)
reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment)
// Use tail true so we can ensure we got the right number of samples.
watcher.readSegment(reader, i, true)
require.NoError(t, segment.Close())
}
expectedSeries := seriesCount
expectedSamples := seriesCount * samplesCount
expectedExemplars := seriesCount * exemplarsCount
expectedHistograms := seriesCount * histogramsCount * 2
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() >= expectedSeries
})
require.Equal(t, expectedSeries, wt.checkNumSeries(), "did not receive the expected number of series")
require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples")
require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars")
require.Equal(t, expectedHistograms, wt.histogramsAppended, "did not receive the expected number of histograms")
require.Equal(t, expectedHistograms, wt.floatHistogramsAppended, "did not receive the expected number of float histograms")
})
}
}
func TestReadToEndNoCheckpoint(t *testing.T) {
pageSize := 32 * 1024
const seriesCount = 10
const samplesCount = 250
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
w, err := NewSize(nil, nil, wdir, 128*pageSize, compress)
require.NoError(t, err)
defer func() {
require.NoError(t, w.Close())
}()
var recs [][]byte
enc := record.Encoder{}
for i := range seriesCount {
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
recs = append(recs, series)
for j := range samplesCount {
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(j),
T: int64(i),
V: float64(i),
},
}, nil)
recs = append(recs, sample)
// Randomly batch up records.
if rand.Intn(4) < 3 {
require.NoError(t, w.Log(recs...))
recs = recs[:0]
}
}
}
require.NoError(t, w.Log(recs...))
overwriteReadTimeout(t, time.Second)
_, _, err = Segments(w.Dir())
require.NoError(t, err)
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
go watcher.Start()
expected := seriesCount
require.Eventually(t, func() bool {
return wt.checkNumSeries() == expected
}, 20*time.Second, 1*time.Second)
watcher.Stop()
})
}
}
func TestReadToEndWithCheckpoint(t *testing.T) {
segmentSize := 32 * 1024
// We need something similar to this # of series and samples
// in order to get enough segments for us to checkpoint.
const seriesCount = 10
const samplesCount = 250
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, segmentSize, compress)
require.NoError(t, err)
defer func() {
require.NoError(t, w.Close())
}()
// Write to the initial segment then checkpoint.
for i := range seriesCount {
ref := i + 100
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
// Add in an unknown record type, which should be ignored.
require.NoError(t, w.Log([]byte{255}))
for range samplesCount {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
Checkpoint(promslog.NewNopLogger(), w, 0, 1, func(chunks.HeadSeriesRef) bool { return true }, 0)
w.Truncate(1)
// Write more records after checkpointing.
for i := range seriesCount {
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for j := range samplesCount {
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(j),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
_, _, err = Segments(w.Dir())
require.NoError(t, err)
overwriteReadTimeout(t, time.Second)
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
go watcher.Start()
expected := seriesCount * 2
require.Eventually(t, func() bool {
return wt.checkNumSeries() == expected
}, 10*time.Second, 1*time.Second)
watcher.Stop()
})
}
}
func TestReadCheckpoint(t *testing.T) {
t.Parallel()
pageSize := 32 * 1024
const seriesCount = 10
const samplesCount = 250
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
f, err := os.Create(SegmentName(wdir, 30))
require.NoError(t, err)
require.NoError(t, f.Close())
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, 128*pageSize, compress)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, w.Close())
})
// Write to the initial segment then checkpoint.
for i := range seriesCount {
ref := i + 100
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for range samplesCount {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
_, err = w.NextSegmentSync()
require.NoError(t, err)
_, err = Checkpoint(promslog.NewNopLogger(), w, 30, 31, func(chunks.HeadSeriesRef) bool { return true }, 0)
require.NoError(t, err)
require.NoError(t, w.Truncate(32))
// Start read after checkpoint, no more data written.
_, _, err = Segments(w.Dir())
require.NoError(t, err)
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
go watcher.Start()
expectedSeries := seriesCount
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() >= expectedSeries
})
watcher.Stop()
require.Equal(t, expectedSeries, wt.checkNumSeries())
})
}
}
func TestReadCheckpointMultipleSegments(t *testing.T) {
pageSize := 32 * 1024
const segments = 1
const seriesCount = 20
const samplesCount = 300
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, pageSize, compress)
require.NoError(t, err)
// Write a bunch of data.
for i := range segments {
for j := range seriesCount {
ref := j + (i * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for range samplesCount {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
}
require.NoError(t, w.Close())
// At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5.
checkpointDir := dir + "/wal/checkpoint.000004"
err = os.Mkdir(checkpointDir, 0o777)
require.NoError(t, err)
for i := 0; i <= 4; i++ {
err := os.Rename(SegmentName(dir+"/wal", i), SegmentName(checkpointDir, i))
require.NoError(t, err)
}
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = -1
// Set the Watcher's metrics so they're not nil pointers.
watcher.SetMetrics()
lastCheckpoint, _, err := LastCheckpoint(watcher.walDir)
require.NoError(t, err)
err = watcher.readCheckpoint(lastCheckpoint, (*Watcher).readSegment)
require.NoError(t, err)
})
}
}
func TestCheckpointSeriesReset(t *testing.T) {
segmentSize := 32 * 1024
// We need something similar to this # of series and samples
// in order to get enough segments for us to checkpoint.
const seriesCount = 20
const samplesCount = 350
testCases := []struct {
compress compression.Type
segments int
}{
{compress: compression.None, segments: 14},
{compress: compression.Snappy, segments: 13},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("compress=%s", tc.compress), func(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, segmentSize, tc.compress)
require.NoError(t, err)
defer func() {
require.NoError(t, w.Close())
}()
// Write to the initial segment, then checkpoint later.
for i := range seriesCount {
ref := i + 100
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for range samplesCount {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
_, _, err = Segments(w.Dir())
require.NoError(t, err)
overwriteReadTimeout(t, time.Second)
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = -1
go watcher.Start()
expected := seriesCount
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() >= expected
})
require.Eventually(t, func() bool {
return wt.checkNumSeries() == seriesCount
}, 10*time.Second, 1*time.Second)
_, err = Checkpoint(promslog.NewNopLogger(), w, 2, 4, func(chunks.HeadSeriesRef) bool { return true }, 0)
require.NoError(t, err)
err = w.Truncate(5)
require.NoError(t, err)
_, cpi, err := LastCheckpoint(path.Join(dir, "wal"))
require.NoError(t, err)
err = watcher.garbageCollectSeries(cpi + 1)
require.NoError(t, err)
watcher.Stop()
// If you modify the checkpoint and truncate segment #'s run the test to see how
// many series records you end up with and change the last Equals check accordingly
// or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10)
require.Eventually(t, func() bool {
return wt.checkNumSeries() == tc.segments
}, 20*time.Second, 1*time.Second)
})
}
}
func TestRun_StartupTime(t *testing.T) {
t.Parallel()
const pageSize = 32 * 1024
const segments = 10
const seriesCount = 20
const samplesCount = 300
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, pageSize, compress)
require.NoError(t, err)
for i := range segments {
for j := range seriesCount {
ref := j + (i * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for range samplesCount {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
}
require.NoError(t, w.Close())
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = segments
watcher.SetMetrics()
startTime := time.Now()
err = watcher.Run()
require.Less(t, time.Since(startTime), readTimeout)
require.NoError(t, err)
})
}
}
func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error {
enc := record.Encoder{}
for j := range seriesCount {
ref := j + (segment * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", segment)),
},
}, nil)
if err := w.Log(series); err != nil {
return err
}
for range samplesCount {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(segment),
V: float64(segment),
},
}, nil)
if err := w.Log(sample); err != nil {
return err
}
}
}
return nil
}
func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
if runtime.GOOS == "windows" { // Takes a really long time, perhaps because min sleep time is 15ms.
t.SkipNow()
}
const segmentSize = pageSize // Smallest allowed segment size.
const segmentsToWrite = 5
const segmentsToRead = segmentsToWrite - 1
const seriesCount = 10
const samplesCount = 50
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
w, err := NewSize(nil, nil, wdir, segmentSize, compress)
require.NoError(t, err)
// Write to 00000000, the watcher will read series from it.
require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount))
// Create 00000001, the watcher will tail it once started.
w.NextSegment()
// Set up the watcher and run it in the background.
wt := newWriteToMock(time.Millisecond)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.SetMetrics()
watcher.MaxSegment = segmentsToRead
var g errgroup.Group
g.Go(func() error {
startTime := time.Now()
err = watcher.Run()
if err != nil {
return err
}
// If the watcher was to wait for readTicker to read every new segment, it would need readTimeout * segmentsToRead.
d := time.Since(startTime)
if d > readTimeout {
return fmt.Errorf("watcher ran for %s, it shouldn't rely on readTicker=%s to read the new segments", d, readTimeout)
}
return nil
})
// The watcher went through 00000000 and is tailing the next one.
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() == seriesCount
})
// In the meantime, add some new segments in bulk.
// We should end up with segmentsToWrite + 1 segments now.
for i := 1; i < segmentsToWrite; i++ {
require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount))
w.NextSegment()
}
// Wait for the watcher.
require.NoError(t, g.Wait())
// All series and samples were read.
require.Equal(t, (segmentsToRead+1)*seriesCount, wt.checkNumSeries()) // Series from 00000000 are also read.
require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended)
require.NoError(t, w.Close())
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/wlog_test.go | tsdb/wlog/wlog_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"os"
"path/filepath"
"testing"
"github.com/prometheus/client_golang/prometheus"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/util/compression"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
// TestWALRepair_ReadingError ensures that a repair is run for an error
// when reading a record.
func TestWALRepair_ReadingError(t *testing.T) {
for name, test := range map[string]struct {
corrSgm int // Which segment to corrupt.
corrFunc func(f *os.File) // Func that applies the corruption.
intactRecs int // Total expected records left after the repair.
}{
"torn_last_record": {
2,
func(f *os.File) {
_, err := f.Seek(pageSize*2, 0)
require.NoError(t, err)
_, err = f.Write([]byte{byte(recFirst)})
require.NoError(t, err)
},
8,
},
// Ensures that the page buffer is big enough to fit
// an entire page size without panicking.
// https://github.com/prometheus/tsdb/pull/414
"bad_header": {
1,
func(f *os.File) {
_, err := f.Seek(pageSize, 0)
require.NoError(t, err)
_, err = f.Write([]byte{byte(recPageTerm)})
require.NoError(t, err)
},
4,
},
"bad_fragment_sequence": {
1,
func(f *os.File) {
_, err := f.Seek(pageSize, 0)
require.NoError(t, err)
_, err = f.Write([]byte{byte(recLast)})
require.NoError(t, err)
},
4,
},
"bad_fragment_flag": {
1,
func(f *os.File) {
_, err := f.Seek(pageSize, 0)
require.NoError(t, err)
_, err = f.Write([]byte{123})
require.NoError(t, err)
},
4,
},
"bad_checksum": {
1,
func(f *os.File) {
_, err := f.Seek(pageSize+4, 0)
require.NoError(t, err)
_, err = f.Write([]byte{0})
require.NoError(t, err)
},
4,
},
"bad_length": {
1,
func(f *os.File) {
_, err := f.Seek(pageSize+2, 0)
require.NoError(t, err)
_, err = f.Write([]byte{0})
require.NoError(t, err)
},
4,
},
"bad_content": {
1,
func(f *os.File) {
_, err := f.Seek(pageSize+100, 0)
require.NoError(t, err)
_, err = f.Write([]byte("beef"))
require.NoError(t, err)
},
4,
},
} {
t.Run(name, func(t *testing.T) {
dir := t.TempDir()
// We create 3 segments with 3 records each and
// then corrupt a given record in a given segment.
// As a result we want a repaired WAL with given intact records.
segSize := 3 * pageSize
w, err := NewSize(nil, nil, dir, segSize, compression.None)
require.NoError(t, err)
var records [][]byte
for i := 1; i <= 9; i++ {
b := make([]byte, pageSize-recordHeaderSize)
b[0] = byte(i)
records = append(records, b)
require.NoError(t, w.Log(b))
}
first, last, err := Segments(w.Dir())
require.NoError(t, err)
require.Equal(t, 3, 1+last-first, "wlog creation didn't result in expected number of segments")
require.NoError(t, w.Close())
f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0o666)
require.NoError(t, err)
// Apply corruption function.
test.corrFunc(f)
require.NoError(t, f.Close())
w, err = NewSize(nil, nil, dir, segSize, compression.None)
require.NoError(t, err)
defer w.Close()
first, last, err = Segments(w.Dir())
require.NoError(t, err)
// Backfill segments from the most recent checkpoint onwards.
for i := first; i <= last; i++ {
s, err := OpenReadSegment(SegmentName(w.Dir(), i))
require.NoError(t, err)
sr := NewSegmentBufReader(s)
require.NoError(t, err)
r := NewReader(sr)
for r.Next() {
}
// Close the segment so we don't break things on Windows.
s.Close()
// No corruption in this segment.
if r.Err() == nil {
continue
}
require.NoError(t, w.Repair(r.Err()))
break
}
sr, err := NewSegmentsReader(dir)
require.NoError(t, err)
defer sr.Close()
r := NewReader(sr)
var result [][]byte
for r.Next() {
var b []byte
result = append(result, append(b, r.Record()...))
}
require.NoError(t, r.Err())
require.Len(t, result, test.intactRecs, "Wrong number of intact records")
for i, r := range result {
require.True(t, bytes.Equal(records[i], r), "record %d diverges: want %x, got %x", i, records[i][:10], r[:10])
}
// Make sure there is a new 0 size Segment after the corrupted Segment.
_, last, err = Segments(w.Dir())
require.NoError(t, err)
require.Equal(t, test.corrSgm+1, last)
fi, err := os.Stat(SegmentName(dir, last))
require.NoError(t, err)
require.Equal(t, int64(0), fi.Size())
})
}
}
// TestCorruptAndCarryOn writes a multi-segment WAL; corrupts the first segment and
// ensures that an error during reading that segment are correctly repaired before
// moving to write more records to the WAL.
func TestCorruptAndCarryOn(t *testing.T) {
dir := t.TempDir()
var (
logger = promslog.NewNopLogger()
segmentSize = pageSize * 3
recordSize = (pageSize / 3) - recordHeaderSize
)
// Produce a WAL with a two segments of 3 pages with 3 records each,
// so when we truncate the file we're guaranteed to split a record.
{
w, err := NewSize(logger, nil, dir, segmentSize, compression.None)
require.NoError(t, err)
for range 18 {
buf := make([]byte, recordSize)
_, err := rand.Read(buf)
require.NoError(t, err)
err = w.Log(buf)
require.NoError(t, err)
}
err = w.Close()
require.NoError(t, err)
}
// Check all the segments are the correct size.
{
segments, err := listSegments(dir)
require.NoError(t, err)
for _, segment := range segments {
f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0o666)
require.NoError(t, err)
fi, err := f.Stat()
require.NoError(t, err)
t.Log("segment", segment.index, "size", fi.Size())
require.Equal(t, int64(segmentSize), fi.Size())
err = f.Close()
require.NoError(t, err)
}
}
// Truncate the first file, splitting the middle record in the second
// page in half, leaving 4 valid records.
{
f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0o666)
require.NoError(t, err)
fi, err := f.Stat()
require.NoError(t, err)
require.Equal(t, int64(segmentSize), fi.Size())
err = f.Truncate(int64(segmentSize / 2))
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
}
// Now try and repair this WAL, and write 5 more records to it.
{
sr, err := NewSegmentsReader(dir)
require.NoError(t, err)
reader := NewReader(sr)
i := 0
for ; i < 4 && reader.Next(); i++ {
require.Len(t, reader.Record(), recordSize)
}
require.Equal(t, 4, i, "not enough records")
require.False(t, reader.Next(), "unexpected record")
corruptionErr := reader.Err()
require.Error(t, corruptionErr)
err = sr.Close()
require.NoError(t, err)
w, err := NewSize(logger, nil, dir, segmentSize, compression.None)
require.NoError(t, err)
err = w.Repair(corruptionErr)
require.NoError(t, err)
// Ensure that we have a completely clean slate after repairing.
require.Equal(t, 1, w.segment.Index()) // We corrupted segment 0.
require.Equal(t, 0, w.donePages)
for range 5 {
buf := make([]byte, recordSize)
_, err := rand.Read(buf)
require.NoError(t, err)
err = w.Log(buf)
require.NoError(t, err)
}
err = w.Close()
require.NoError(t, err)
}
// Replay the WAL. Should get 9 records.
{
sr, err := NewSegmentsReader(dir)
require.NoError(t, err)
reader := NewReader(sr)
i := 0
for ; i < 9 && reader.Next(); i++ {
require.Len(t, reader.Record(), recordSize)
}
require.Equal(t, 9, i, "wrong number of records")
require.False(t, reader.Next(), "unexpected record")
require.NoError(t, reader.Err())
sr.Close()
}
}
// TestClose ensures that calling Close more than once doesn't panic and doesn't block.
func TestClose(t *testing.T) {
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, pageSize, compression.None)
require.NoError(t, err)
require.NoError(t, w.Close())
require.Error(t, w.Close())
}
func TestSegmentMetric(t *testing.T) {
var (
segmentSize = pageSize
recordSize = (pageSize / 2) - recordHeaderSize
)
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, segmentSize, compression.None)
require.NoError(t, err)
initialSegment := client_testutil.ToFloat64(w.metrics.currentSegment)
// Write 3 records, each of which is half the segment size, meaning we should rotate to the next segment.
for range 3 {
buf := make([]byte, recordSize)
_, err := rand.Read(buf)
require.NoError(t, err)
err = w.Log(buf)
require.NoError(t, err)
}
require.Equal(t, initialSegment+1, client_testutil.ToFloat64(w.metrics.currentSegment), "segment metric did not increment after segment rotation")
require.NoError(t, w.Close())
}
func TestCompression(t *testing.T) {
t.Parallel()
bootstrap := func(compressed compression.Type) string {
const (
segmentSize = pageSize
recordSize = (pageSize / 2) - recordHeaderSize
records = 100
)
dirPath := t.TempDir()
w, err := NewSize(nil, nil, dirPath, segmentSize, compressed)
require.NoError(t, err)
buf := make([]byte, recordSize)
for range records {
require.NoError(t, w.Log(buf))
}
require.NoError(t, w.Close())
return dirPath
}
tmpDirs := make([]string, 0, 3)
defer func() {
for _, dir := range tmpDirs {
require.NoError(t, os.RemoveAll(dir))
}
}()
dirUnCompressed := bootstrap(compression.None)
tmpDirs = append(tmpDirs, dirUnCompressed)
for _, compressionType := range []compression.Type{compression.Snappy, compression.Zstd} {
dirCompressed := bootstrap(compressionType)
tmpDirs = append(tmpDirs, dirCompressed)
uncompressedSize, err := fileutil.DirSize(dirUnCompressed)
require.NoError(t, err)
compressedSize, err := fileutil.DirSize(dirCompressed)
require.NoError(t, err)
require.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize)
}
}
func TestLogPartialWrite(t *testing.T) {
const segmentSize = pageSize * 2
record := []byte{1, 2, 3, 4, 5}
tests := map[string]struct {
numRecords int
faultyRecord int
}{
"partial write when logging first record in a page": {
numRecords: 10,
faultyRecord: 1,
},
"partial write when logging record in the middle of a page": {
numRecords: 10,
faultyRecord: 3,
},
"partial write when logging last record of a page": {
numRecords: (pageSize / (recordHeaderSize + len(record))) + 10,
faultyRecord: pageSize / (recordHeaderSize + len(record)),
},
// TODO the current implementation suffers this:
// "partial write when logging a record overlapping two pages": {
// numRecords: (pageSize / (recordHeaderSize + len(record))) + 10,
// faultyRecord: pageSize/(recordHeaderSize+len(record)) + 1,
// },
}
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
dirPath := t.TempDir()
w, err := NewSize(nil, nil, dirPath, segmentSize, compression.None)
require.NoError(t, err)
// Replace the underlying segment file with a mocked one that injects a failure.
w.segment.SegmentFile = &faultySegmentFile{
SegmentFile: w.segment.SegmentFile,
writeFailureAfter: ((recordHeaderSize + len(record)) * (testData.faultyRecord - 1)) + 2,
writeFailureErr: io.ErrShortWrite,
}
for i := 1; i <= testData.numRecords; i++ {
if err := w.Log(record); i == testData.faultyRecord {
require.ErrorIs(t, io.ErrShortWrite, err)
} else {
require.NoError(t, err)
}
}
require.NoError(t, w.Close())
// Read it back. We expect no corruption.
s, err := OpenReadSegment(SegmentName(dirPath, 0))
require.NoError(t, err)
defer func() { require.NoError(t, s.Close()) }()
r := NewReader(NewSegmentBufReader(s))
for i := 0; i < testData.numRecords; i++ {
require.True(t, r.Next())
require.NoError(t, r.Err())
require.Equal(t, record, r.Record())
}
require.False(t, r.Next())
require.NoError(t, r.Err())
})
}
}
type faultySegmentFile struct {
SegmentFile
written int
writeFailureAfter int
writeFailureErr error
}
func (f *faultySegmentFile) Write(p []byte) (int, error) {
if f.writeFailureAfter >= 0 && f.writeFailureAfter < f.written+len(p) {
partialLen := f.writeFailureAfter - f.written
if partialLen <= 0 || partialLen >= len(p) {
partialLen = 1
}
// Inject failure.
n, _ := f.SegmentFile.Write(p[:partialLen])
f.written += n
f.writeFailureAfter = -1
return n, f.writeFailureErr
}
// Proxy the write to the underlying file.
n, err := f.SegmentFile.Write(p)
f.written += n
return n, err
}
func BenchmarkWAL_LogBatched(b *testing.B) {
for _, compress := range compression.Types() {
b.Run(fmt.Sprintf("compress=%s", compress), func(b *testing.B) {
dir := b.TempDir()
w, err := New(nil, nil, dir, compress)
require.NoError(b, err)
defer w.Close()
var buf [2048]byte
var recs [][]byte
b.SetBytes(2048)
for b.Loop() {
recs = append(recs, buf[:])
if len(recs) < 1000 {
continue
}
err := w.Log(recs...)
require.NoError(b, err)
recs = recs[:0]
}
// Stop timer to not count fsync time on close.
// If it's counted batched vs. single benchmarks are very similar but
// do not show burst throughput well.
b.StopTimer()
})
}
}
func BenchmarkWAL_Log(b *testing.B) {
for _, compress := range compression.Types() {
b.Run(fmt.Sprintf("compress=%s", compress), func(b *testing.B) {
dir := b.TempDir()
w, err := New(nil, nil, dir, compress)
require.NoError(b, err)
defer w.Close()
var buf [2048]byte
b.SetBytes(2048)
for b.Loop() {
err := w.Log(buf[:])
require.NoError(b, err)
}
// Stop timer to not count fsync time on close.
// If it's counted batched vs. single benchmarks are very similar but
// do not show burst throughput well.
b.StopTimer()
})
}
}
func TestUnregisterMetrics(t *testing.T) {
reg := prometheus.NewRegistry()
for range 2 {
wl, err := New(promslog.NewNopLogger(), reg, t.TempDir(), compression.None)
require.NoError(t, err)
require.NoError(t, wl.Close())
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/checkpoint.go | tsdb/wlog/checkpoint.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"errors"
"fmt"
"io"
"log/slog"
"math"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
// CheckpointStats returns stats about a created checkpoint.
type CheckpointStats struct {
DroppedSeries int
DroppedSamples int // Includes histograms.
DroppedTombstones int
DroppedExemplars int
DroppedMetadata int
TotalSeries int // Processed series including dropped ones.
TotalSamples int // Processed float and histogram samples including dropped ones.
TotalTombstones int // Processed tombstones including dropped ones.
TotalExemplars int // Processed exemplars including dropped ones.
TotalMetadata int // Processed metadata including dropped ones.
}
// LastCheckpoint returns the directory name and index of the most recent checkpoint.
// If dir does not contain any checkpoints, ErrNotFound is returned.
func LastCheckpoint(dir string) (string, int, error) {
checkpoints, err := listCheckpoints(dir)
if err != nil {
return "", 0, err
}
if len(checkpoints) == 0 {
return "", 0, record.ErrNotFound
}
checkpoint := checkpoints[len(checkpoints)-1]
return filepath.Join(dir, checkpoint.name), checkpoint.index, nil
}
// DeleteCheckpoints deletes all checkpoints in a directory below a given index.
func DeleteCheckpoints(dir string, maxIndex int) error {
checkpoints, err := listCheckpoints(dir)
if err != nil {
return err
}
errs := tsdb_errors.NewMulti()
for _, checkpoint := range checkpoints {
if checkpoint.index >= maxIndex {
break
}
errs.Add(os.RemoveAll(filepath.Join(dir, checkpoint.name)))
}
return errs.Err()
}
// CheckpointPrefix is the prefix used for checkpoint files.
const CheckpointPrefix = "checkpoint."
// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL.
// It includes the most recent checkpoint if it exists.
// All series not satisfying keep, samples/tombstones/exemplars below mint and
// metadata that are not the latest are dropped.
//
// The checkpoint is stored in a directory named checkpoint.N in the same
// segmented format as the original WAL itself.
// This makes it easy to read it through the WAL package and concatenate
// it with the original WAL.
func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) {
stats := &CheckpointStats{}
var sgmReader io.ReadCloser
logger.Info("Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint)
{
var sgmRange []SegmentRange
dir, idx, err := LastCheckpoint(w.Dir())
if err != nil && !errors.Is(err, record.ErrNotFound) {
return nil, fmt.Errorf("find last checkpoint: %w", err)
}
last := idx + 1
if err == nil {
if from > last {
return nil, fmt.Errorf("unexpected gap to last checkpoint. expected:%v, requested:%v", last, from)
}
// Ignore WAL files below the checkpoint. They shouldn't exist to begin with.
from = last
sgmRange = append(sgmRange, SegmentRange{Dir: dir, Last: math.MaxInt32})
}
sgmRange = append(sgmRange, SegmentRange{Dir: w.Dir(), First: from, Last: to})
sgmReader, err = NewSegmentsRangeReader(sgmRange...)
if err != nil {
return nil, fmt.Errorf("create segment reader: %w", err)
}
defer sgmReader.Close()
}
cpdir := checkpointDir(w.Dir(), to)
cpdirtmp := cpdir + ".tmp"
if err := os.RemoveAll(cpdirtmp); err != nil {
return nil, fmt.Errorf("remove previous temporary checkpoint dir: %w", err)
}
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
return nil, fmt.Errorf("create checkpoint dir: %w", err)
}
cp, err := New(nil, nil, cpdirtmp, w.CompressionType())
if err != nil {
return nil, fmt.Errorf("open checkpoint: %w", err)
}
// Ensures that an early return caused by an error doesn't leave any tmp files.
defer func() {
cp.Close()
os.RemoveAll(cpdirtmp)
}()
r := NewReader(sgmReader)
var (
series []record.RefSeries
samples []record.RefSample
histogramSamples []record.RefHistogramSample
floatHistogramSamples []record.RefFloatHistogramSample
tstones []tombstones.Stone
exemplars []record.RefExemplar
metadata []record.RefMetadata
st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
dec = record.NewDecoder(st, logger)
enc record.Encoder
buf []byte
recs [][]byte
latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata)
)
for r.Next() {
series, samples, histogramSamples, floatHistogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], floatHistogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
// We don't reset the buffer since we batch up multiple records
// before writing them to the checkpoint.
// Remember where the record for this iteration starts.
start := len(buf)
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
series, err = dec.Series(rec, series)
if err != nil {
return nil, fmt.Errorf("decode series: %w", err)
}
// Drop irrelevant series in place.
repl := series[:0]
for _, s := range series {
if keep(s.Ref) {
repl = append(repl, s)
}
}
if len(repl) > 0 {
buf = enc.Series(repl, buf)
}
stats.TotalSeries += len(series)
stats.DroppedSeries += len(series) - len(repl)
case record.Samples:
samples, err = dec.Samples(rec, samples)
if err != nil {
return nil, fmt.Errorf("decode samples: %w", err)
}
// Drop irrelevant samples in place.
repl := samples[:0]
for _, s := range samples {
if s.T >= mint {
repl = append(repl, s)
}
}
if len(repl) > 0 {
buf = enc.Samples(repl, buf)
}
stats.TotalSamples += len(samples)
stats.DroppedSamples += len(samples) - len(repl)
case record.HistogramSamples:
histogramSamples, err = dec.HistogramSamples(rec, histogramSamples)
if err != nil {
return nil, fmt.Errorf("decode histogram samples: %w", err)
}
// Drop irrelevant histogramSamples in place.
repl := histogramSamples[:0]
for _, h := range histogramSamples {
if h.T >= mint {
repl = append(repl, h)
}
}
if len(repl) > 0 {
buf, _ = enc.HistogramSamples(repl, buf)
}
stats.TotalSamples += len(histogramSamples)
stats.DroppedSamples += len(histogramSamples) - len(repl)
case record.CustomBucketsHistogramSamples:
histogramSamples, err = dec.HistogramSamples(rec, histogramSamples)
if err != nil {
return nil, fmt.Errorf("decode histogram samples: %w", err)
}
// Drop irrelevant histogramSamples in place.
repl := histogramSamples[:0]
for _, h := range histogramSamples {
if h.T >= mint {
repl = append(repl, h)
}
}
if len(repl) > 0 {
buf = enc.CustomBucketsHistogramSamples(repl, buf)
}
stats.TotalSamples += len(histogramSamples)
stats.DroppedSamples += len(histogramSamples) - len(repl)
case record.FloatHistogramSamples:
floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples)
if err != nil {
return nil, fmt.Errorf("decode float histogram samples: %w", err)
}
// Drop irrelevant floatHistogramSamples in place.
repl := floatHistogramSamples[:0]
for _, fh := range floatHistogramSamples {
if fh.T >= mint {
repl = append(repl, fh)
}
}
if len(repl) > 0 {
buf, _ = enc.FloatHistogramSamples(repl, buf)
}
stats.TotalSamples += len(floatHistogramSamples)
stats.DroppedSamples += len(floatHistogramSamples) - len(repl)
case record.CustomBucketsFloatHistogramSamples:
floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples)
if err != nil {
return nil, fmt.Errorf("decode float histogram samples: %w", err)
}
// Drop irrelevant floatHistogramSamples in place.
repl := floatHistogramSamples[:0]
for _, fh := range floatHistogramSamples {
if fh.T >= mint {
repl = append(repl, fh)
}
}
if len(repl) > 0 {
buf = enc.CustomBucketsFloatHistogramSamples(repl, buf)
}
stats.TotalSamples += len(floatHistogramSamples)
stats.DroppedSamples += len(floatHistogramSamples) - len(repl)
case record.Tombstones:
tstones, err = dec.Tombstones(rec, tstones)
if err != nil {
return nil, fmt.Errorf("decode deletes: %w", err)
}
// Drop irrelevant tombstones in place.
repl := tstones[:0]
for _, s := range tstones {
for _, iv := range s.Intervals {
if iv.Maxt >= mint {
repl = append(repl, s)
break
}
}
}
if len(repl) > 0 {
buf = enc.Tombstones(repl, buf)
}
stats.TotalTombstones += len(tstones)
stats.DroppedTombstones += len(tstones) - len(repl)
case record.Exemplars:
exemplars, err = dec.Exemplars(rec, exemplars)
if err != nil {
return nil, fmt.Errorf("decode exemplars: %w", err)
}
// Drop irrelevant exemplars in place.
repl := exemplars[:0]
for _, e := range exemplars {
if e.T >= mint {
repl = append(repl, e)
}
}
if len(repl) > 0 {
buf = enc.Exemplars(repl, buf)
}
stats.TotalExemplars += len(exemplars)
stats.DroppedExemplars += len(exemplars) - len(repl)
case record.Metadata:
metadata, err := dec.Metadata(rec, metadata)
if err != nil {
return nil, fmt.Errorf("decode metadata: %w", err)
}
// Only keep reference to the latest found metadata for each refID.
repl := 0
for _, m := range metadata {
if keep(m.Ref) {
if _, ok := latestMetadataMap[m.Ref]; !ok {
repl++
}
latestMetadataMap[m.Ref] = m
}
}
stats.TotalMetadata += len(metadata)
stats.DroppedMetadata += len(metadata) - repl
default:
// Unknown record type, probably from a future Prometheus version.
continue
}
if len(buf[start:]) == 0 {
continue // All contents discarded.
}
recs = append(recs, buf[start:])
// Flush records in 1 MB increments.
if len(buf) > 1*1024*1024 {
if err := cp.Log(recs...); err != nil {
return nil, fmt.Errorf("flush records: %w", err)
}
buf, recs = buf[:0], recs[:0]
}
}
// If we hit any corruption during checkpointing, repairing is not an option.
// The head won't know which series records are lost.
if r.Err() != nil {
return nil, fmt.Errorf("read segments: %w", r.Err())
}
// Flush remaining records.
if err := cp.Log(recs...); err != nil {
return nil, fmt.Errorf("flush records: %w", err)
}
// Flush latest metadata records for each series.
if len(latestMetadataMap) > 0 {
latestMetadata := make([]record.RefMetadata, 0, len(latestMetadataMap))
for _, m := range latestMetadataMap {
latestMetadata = append(latestMetadata, m)
}
if err := cp.Log(enc.Metadata(latestMetadata, buf[:0])); err != nil {
return nil, fmt.Errorf("flush metadata records: %w", err)
}
}
if err := cp.Close(); err != nil {
return nil, fmt.Errorf("close checkpoint: %w", err)
}
// Sync temporary directory before rename.
df, err := fileutil.OpenDir(cpdirtmp)
if err != nil {
return nil, fmt.Errorf("open temporary checkpoint directory: %w", err)
}
if err := df.Sync(); err != nil {
df.Close()
return nil, fmt.Errorf("sync temporary checkpoint directory: %w", err)
}
if err = df.Close(); err != nil {
return nil, fmt.Errorf("close temporary checkpoint directory: %w", err)
}
if err := fileutil.Replace(cpdirtmp, cpdir); err != nil {
return nil, fmt.Errorf("rename checkpoint directory: %w", err)
}
return stats, nil
}
func checkpointDir(dir string, i int) string {
return filepath.Join(dir, fmt.Sprintf(CheckpointPrefix+"%08d", i))
}
type checkpointRef struct {
name string
index int
}
func listCheckpoints(dir string) (refs []checkpointRef, err error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
for i := range files {
fi := files[i]
if !strings.HasPrefix(fi.Name(), CheckpointPrefix) {
continue
}
if !fi.IsDir() {
return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name())
}
idx, err := strconv.Atoi(fi.Name()[len(CheckpointPrefix):])
if err != nil {
continue
}
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
}
slices.SortFunc(refs, func(a, b checkpointRef) int {
return a.index - b.index
})
return refs, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/watcher.go | tsdb/wlog/watcher.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"errors"
"fmt"
"io"
"log/slog"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/tsdb/record"
)
const (
checkpointPeriod = 5 * time.Second
segmentCheckPeriod = 100 * time.Millisecond
consumer = "consumer"
)
var (
ErrIgnorable = errors.New("ignore me")
readTimeout = 15 * time.Second
)
// WriteTo is an interface used by the Watcher to send the samples it's read
// from the WAL on to somewhere else. Functions will be called concurrently
// and it is left to the implementer to make sure they are safe.
type WriteTo interface {
// Append and AppendExemplar should block until the samples are fully accepted,
// whether enqueued in memory or successfully written to it's final destination.
// Once returned, the WAL Watcher will not attempt to pass that data again.
Append([]record.RefSample) bool
AppendExemplars([]record.RefExemplar) bool
AppendHistograms([]record.RefHistogramSample) bool
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
StoreSeries([]record.RefSeries, int)
StoreMetadata([]record.RefMetadata)
// UpdateSeriesSegment and SeriesReset are intended for
// garbage-collection:
// First we call UpdateSeriesSegment on all current series.
UpdateSeriesSegment([]record.RefSeries, int)
// Then SeriesReset is called to allow the deletion of all series
// created in a segment lower than the argument.
SeriesReset(int)
}
// WriteNotified notifies the watcher that data has been written so that it can read.
type WriteNotified interface {
Notify()
}
type WatcherMetrics struct {
reg prometheus.Registerer
recordsRead *prometheus.CounterVec
recordDecodeFails *prometheus.CounterVec
samplesSentPreTailing *prometheus.CounterVec
currentSegment *prometheus.GaugeVec
notificationsSkipped *prometheus.CounterVec
}
// Watcher watches the TSDB WAL for a given WriteTo.
type Watcher struct {
name string
writer WriteTo
logger *slog.Logger
walDir string
lastCheckpoint string
sendExemplars bool
sendHistograms bool
sendMetadata bool
metrics *WatcherMetrics
readerMetrics *LiveReaderMetrics
startTime time.Time
startTimestamp int64 // the start time as a Prometheus timestamp
sendSamples bool
recordsReadMetric *prometheus.CounterVec
recordDecodeFailsMetric prometheus.Counter
samplesSentPreTailing prometheus.Counter
currentSegmentMetric prometheus.Gauge
notificationsSkipped prometheus.Counter
readNotify chan struct{}
quit chan struct{}
done chan struct{}
// For testing, stop when we hit this segment.
MaxSegment int
}
func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
m := &WatcherMetrics{
reg: reg,
recordsRead: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "wal_watcher",
Name: "records_read_total",
Help: "Number of records read by the WAL watcher from the WAL.",
},
[]string{consumer, "type"},
),
recordDecodeFails: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "wal_watcher",
Name: "record_decode_failures_total",
Help: "Number of records read by the WAL watcher that resulted in an error when decoding.",
},
[]string{consumer},
),
samplesSentPreTailing: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "wal_watcher",
Name: "samples_sent_pre_tailing_total",
Help: "Number of sample records read by the WAL watcher and sent to remote write during replay of existing WAL.",
},
[]string{consumer},
),
currentSegment: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "prometheus",
Subsystem: "wal_watcher",
Name: "current_segment",
Help: "Current segment the WAL watcher is reading records from.",
},
[]string{consumer},
),
notificationsSkipped: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "wal_watcher",
Name: "notifications_skipped_total",
Help: "The number of WAL write notifications that the Watcher has skipped due to already being in a WAL read routine.",
},
[]string{consumer},
),
}
if reg != nil {
reg.MustRegister(m.recordsRead)
reg.MustRegister(m.recordDecodeFails)
reg.MustRegister(m.samplesSentPreTailing)
reg.MustRegister(m.currentSegment)
reg.MustRegister(m.notificationsSkipped)
}
return m
}
// Unregister unregisters metrics emitted by this instance.
func (m *WatcherMetrics) Unregister() {
if m.reg == nil {
return
}
m.reg.Unregister(m.recordsRead)
m.reg.Unregister(m.recordDecodeFails)
m.reg.Unregister(m.samplesSentPreTailing)
m.reg.Unregister(m.currentSegment)
m.reg.Unregister(m.notificationsSkipped)
}
// NewWatcher creates a new WAL watcher for a given WriteTo.
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger *slog.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher {
if logger == nil {
logger = promslog.NewNopLogger()
}
return &Watcher{
logger: logger,
writer: writer,
metrics: metrics,
readerMetrics: readerMetrics,
walDir: filepath.Join(dir, "wal"),
name: name,
sendExemplars: sendExemplars,
sendHistograms: sendHistograms,
sendMetadata: sendMetadata,
readNotify: make(chan struct{}),
quit: make(chan struct{}),
done: make(chan struct{}),
MaxSegment: -1,
}
}
func (w *Watcher) Notify() {
select {
case w.readNotify <- struct{}{}:
return
default: // default so we can exit
// we don't need a buffered channel or any buffering since
// for each notification it recv's the watcher will read until EOF
w.notificationsSkipped.Inc()
}
}
func (w *Watcher) SetMetrics() {
// Setup the WAL Watchers metrics. We do this here rather than in the
// constructor because of the ordering of creating Queue Managers's,
// stopping them, and then starting new ones in storage/remote/storage.go ApplyConfig.
if w.metrics != nil {
w.recordsReadMetric = w.metrics.recordsRead.MustCurryWith(prometheus.Labels{consumer: w.name})
w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name)
w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name)
w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name)
w.notificationsSkipped = w.metrics.notificationsSkipped.WithLabelValues(w.name)
}
}
// Start the Watcher.
func (w *Watcher) Start() {
w.SetMetrics()
w.logger.Info("Starting WAL watcher", "queue", w.name)
go w.loop()
}
// Stop the Watcher.
func (w *Watcher) Stop() {
close(w.quit)
<-w.done
// Records read metric has series and samples.
if w.metrics != nil {
w.metrics.recordsRead.DeleteLabelValues(w.name, "series")
w.metrics.recordsRead.DeleteLabelValues(w.name, "samples")
w.metrics.recordDecodeFails.DeleteLabelValues(w.name)
w.metrics.samplesSentPreTailing.DeleteLabelValues(w.name)
w.metrics.currentSegment.DeleteLabelValues(w.name)
}
w.logger.Info("WAL watcher stopped", "queue", w.name)
}
func (w *Watcher) loop() {
defer close(w.done)
// We may encounter failures processing the WAL; we should wait and retry.
for !isClosed(w.quit) {
w.SetStartTime(time.Now())
if err := w.Run(); err != nil {
w.logger.Error("error tailing WAL", "err", err)
}
select {
case <-w.quit:
return
case <-time.After(5 * time.Second):
}
}
}
// Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit.
func (w *Watcher) Run() error {
_, lastSegment, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("Segments: %w", err)
}
// We want to ensure this is false across iterations since
// Run will be called again if there was a failure to read the WAL.
w.sendSamples = false
w.logger.Info("Replaying WAL", "queue", w.name)
// Backfill from the checkpoint first if it exists.
lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir)
if err != nil && !errors.Is(err, record.ErrNotFound) {
return fmt.Errorf("tsdb.LastCheckpoint: %w", err)
}
if err == nil {
if err = w.readCheckpoint(lastCheckpoint, (*Watcher).readSegment); err != nil {
return fmt.Errorf("readCheckpoint: %w", err)
}
}
w.lastCheckpoint = lastCheckpoint
currentSegment, err := w.findSegmentForIndex(checkpointIndex)
if err != nil {
return err
}
w.logger.Debug("Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment)
for !isClosed(w.quit) {
w.currentSegmentMetric.Set(float64(currentSegment))
// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
w.logger.Debug("Processing segment", "currentSegment", currentSegment)
if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) {
return err
}
// For testing: stop when you hit a specific segment.
if currentSegment == w.MaxSegment {
return nil
}
currentSegment++
}
return nil
}
// findSegmentForIndex finds the first segment greater than or equal to index.
func (w *Watcher) findSegmentForIndex(index int) (int, error) {
refs, err := listSegments(w.walDir)
if err != nil {
return -1, err
}
for _, r := range refs {
if r.index >= index {
return r.index, nil
}
}
return -1, errors.New("failed to find segment for index")
}
func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error {
err := w.readSegment(r, segmentNum, tail)
// Ignore all errors reading to end of segment whilst replaying the WAL.
if !tail {
if err != nil && !errors.Is(err, io.EOF) {
w.logger.Warn("Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
} else if r.Offset() != size {
w.logger.Warn("Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size)
}
return ErrIgnorable
}
// Otherwise, when we are tailing, non-EOFs are fatal.
if err != nil && !errors.Is(err, io.EOF) {
return err
}
return nil
}
// Use tail true to indicate that the reader is currently on a segment that is
// actively being written to. If false, assume it's a full segment and we're
// replaying it on start to cache the series records.
func (w *Watcher) watch(segmentNum int, tail bool) error {
segment, err := OpenReadSegment(SegmentName(w.walDir, segmentNum))
if err != nil {
return err
}
defer segment.Close()
reader := NewLiveReader(w.logger, w.readerMetrics, segment)
size := int64(math.MaxInt64)
if !tail {
var err error
size, err = getSegmentSize(w.walDir, segmentNum)
if err != nil {
return fmt.Errorf("getSegmentSize: %w", err)
}
return w.readAndHandleError(reader, segmentNum, tail, size)
}
checkpointTicker := time.NewTicker(checkpointPeriod)
defer checkpointTicker.Stop()
segmentTicker := time.NewTicker(segmentCheckPeriod)
defer segmentTicker.Stop()
readTicker := time.NewTicker(readTimeout)
defer readTicker.Stop()
gcSem := make(chan struct{}, 1)
for {
select {
case <-w.quit:
return nil
case <-checkpointTicker.C:
// Periodically check if there is a new checkpoint so we can garbage
// collect labels. As this is considered an optimisation, we ignore
// errors during checkpoint processing. Doing the process asynchronously
// allows the current WAL segment to be processed while reading the
// checkpoint.
select {
case gcSem <- struct{}{}:
go func() {
defer func() {
<-gcSem
}()
if err := w.garbageCollectSeries(segmentNum); err != nil {
w.logger.Warn("Error process checkpoint", "err", err)
}
}()
default:
// Currently doing a garbage collect, try again later.
}
// if a newer segment is produced, read the current one until the end and move on.
case <-segmentTicker.C:
_, last, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("Segments: %w", err)
}
if last > segmentNum {
return w.readAndHandleError(reader, segmentNum, tail, size)
}
continue
// we haven't read due to a notification in quite some time, try reading anyways
case <-readTicker.C:
w.logger.Debug("Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout)
err := w.readAndHandleError(reader, segmentNum, tail, size)
if err != nil {
return err
}
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
case <-w.readNotify:
err := w.readAndHandleError(reader, segmentNum, tail, size)
if err != nil {
return err
}
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
}
}
}
func (w *Watcher) garbageCollectSeries(segmentNum int) error {
dir, _, err := LastCheckpoint(w.walDir)
if err != nil && !errors.Is(err, record.ErrNotFound) {
return fmt.Errorf("tsdb.LastCheckpoint: %w", err)
}
if dir == "" || dir == w.lastCheckpoint {
return nil
}
w.lastCheckpoint = dir
index, err := checkpointNum(dir)
if err != nil {
return fmt.Errorf("error parsing checkpoint filename: %w", err)
}
if index >= segmentNum {
w.logger.Debug("Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir)
return nil
}
w.logger.Debug("New checkpoint detected", "new", dir, "currentSegment", segmentNum)
if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil {
return fmt.Errorf("readCheckpoint: %w", err)
}
// Clear series with a checkpoint or segment index # lower than the checkpoint we just read.
w.writer.SeriesReset(index)
return nil
}
// Read from a segment and pass the details to w.writer.
// Also used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
var (
dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // One table per WAL segment means it won't grow indefinitely.
series []record.RefSeries
samples []record.RefSample
samplesToSend []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
histogramsToSend []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
floatHistogramsToSend []record.RefFloatHistogramSample
metadata []record.RefMetadata
)
for r.Next() && !isClosed(w.quit) {
var err error
rec := r.Record()
w.recordsReadMetric.WithLabelValues(dec.Type(rec).String()).Inc()
switch dec.Type(rec) {
case record.Series:
series, err = dec.Series(rec, series[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
w.writer.StoreSeries(series, segmentNum)
case record.Samples:
// If we're not tailing a segment we can ignore any samples records we see.
// This speeds up replay of the WAL by > 10x.
if !tail {
break
}
samples, err = dec.Samples(rec, samples[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
for _, s := range samples {
if s.T > w.startTimestamp {
if !w.sendSamples {
w.sendSamples = true
duration := time.Since(w.startTime)
w.logger.Info("Done replaying WAL", "duration", duration)
}
samplesToSend = append(samplesToSend, s)
}
}
if len(samplesToSend) > 0 {
w.writer.Append(samplesToSend)
samplesToSend = samplesToSend[:0]
}
case record.Exemplars:
// Skip if experimental "exemplars over remote write" is not enabled.
if !w.sendExemplars {
break
}
// If we're not tailing a segment we can ignore any exemplars records we see.
// This speeds up replay of the WAL significantly.
if !tail {
break
}
exemplars, err = dec.Exemplars(rec, exemplars[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
w.writer.AppendExemplars(exemplars)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
// Skip if "native histograms over remote write" is not enabled.
if !w.sendHistograms {
break
}
if !tail {
break
}
histograms, err = dec.HistogramSamples(rec, histograms[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
for _, h := range histograms {
if h.T > w.startTimestamp {
if !w.sendSamples {
w.sendSamples = true
duration := time.Since(w.startTime)
w.logger.Info("Done replaying WAL", "duration", duration)
}
histogramsToSend = append(histogramsToSend, h)
}
}
if len(histogramsToSend) > 0 {
w.writer.AppendHistograms(histogramsToSend)
histogramsToSend = histogramsToSend[:0]
}
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
// Skip if "native histograms over remote write" is not enabled.
if !w.sendHistograms {
break
}
if !tail {
break
}
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
for _, fh := range floatHistograms {
if fh.T > w.startTimestamp {
if !w.sendSamples {
w.sendSamples = true
duration := time.Since(w.startTime)
w.logger.Info("Done replaying WAL", "duration", duration)
}
floatHistogramsToSend = append(floatHistogramsToSend, fh)
}
}
if len(floatHistogramsToSend) > 0 {
w.writer.AppendFloatHistograms(floatHistogramsToSend)
floatHistogramsToSend = floatHistogramsToSend[:0]
}
case record.Metadata:
if !w.sendMetadata {
break
}
metadata, err = dec.Metadata(rec, metadata[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
w.writer.StoreMetadata(metadata)
case record.Unknown:
// Could be corruption, or reading from a WAL from a newer Prometheus.
w.recordDecodeFailsMetric.Inc()
default:
// We're not interested in other types of records.
}
}
if err := r.Err(); err != nil {
return fmt.Errorf("segment %d: %w", segmentNum, err)
}
return nil
}
// Go through all series in a segment updating the segmentNum, so we can delete older series.
// Used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error {
var (
dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // Needed for decoding; labels do not outlive this function.
series []record.RefSeries
)
for r.Next() && !isClosed(w.quit) {
rec := r.Record()
w.recordsReadMetric.WithLabelValues(dec.Type(rec).String()).Inc()
switch dec.Type(rec) {
case record.Series:
series, err := dec.Series(rec, series[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
w.writer.UpdateSeriesSegment(series, segmentNum)
case record.Unknown:
// Could be corruption, or reading from a WAL from a newer Prometheus.
w.recordDecodeFailsMetric.Inc()
default:
// We're only interested in series.
}
}
if err := r.Err(); err != nil {
return fmt.Errorf("segment %d: %w", segmentNum, err)
}
return nil
}
func (w *Watcher) SetStartTime(t time.Time) {
w.startTime = t
w.startTimestamp = timestamp.FromTime(t)
}
type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) error
// Read all the series records from a Checkpoint directory.
func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) error {
w.logger.Debug("Reading checkpoint", "dir", checkpointDir)
index, err := checkpointNum(checkpointDir)
if err != nil {
return fmt.Errorf("checkpointNum: %w", err)
}
// Ensure we read the whole contents of every segment in the checkpoint dir.
segs, err := listSegments(checkpointDir)
if err != nil {
return fmt.Errorf("unable to get segments checkpoint dir: %w", err)
}
for _, segRef := range segs {
size, err := getSegmentSize(checkpointDir, segRef.index)
if err != nil {
return fmt.Errorf("getSegmentSize: %w", err)
}
sr, err := OpenReadSegment(SegmentName(checkpointDir, segRef.index))
if err != nil {
return fmt.Errorf("unable to open segment: %w", err)
}
r := NewLiveReader(w.logger, w.readerMetrics, sr)
err = readFn(w, r, index, false)
sr.Close()
if err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("readSegment: %w", err)
}
if r.Offset() != size {
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, segRef.index, size, r.Offset())
}
}
w.logger.Debug("Read series references from checkpoint", "checkpoint", checkpointDir)
return nil
}
func checkpointNum(dir string) (int, error) {
// Checkpoint dir names are in the format checkpoint.000001
// dir may contain a hidden directory, so only check the base directory
chunks := strings.Split(filepath.Base(dir), ".")
if len(chunks) != 2 {
return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir)
}
result, err := strconv.Atoi(chunks[1])
if err != nil {
return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir)
}
return result, nil
}
// Get size of segment.
func getSegmentSize(dir string, index int) (int64, error) {
i := int64(-1)
fi, err := os.Stat(SegmentName(dir, index))
if err == nil {
i = fi.Size()
}
return i, err
}
func isClosed(c chan struct{}) bool {
select {
case <-c:
return true
default:
return false
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/wlog/live_reader.go | tsdb/wlog/live_reader.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wlog
import (
"encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
"log/slog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/util/compression"
)
// LiveReaderMetrics holds all metrics exposed by the LiveReader.
type LiveReaderMetrics struct {
reg prometheus.Registerer
readerCorruptionErrors *prometheus.CounterVec
}
// NewLiveReaderMetrics instantiates, registers and returns metrics to be injected
// at LiveReader instantiation.
func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics {
m := &LiveReaderMetrics{
reg: reg,
readerCorruptionErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_reader_corruption_errors_total",
Help: "Errors encountered when reading the WAL.",
}, []string{"error"}),
}
if reg != nil {
reg.MustRegister(m.readerCorruptionErrors)
}
return m
}
// Unregister unregisters metrics emitted by this instance.
func (m *LiveReaderMetrics) Unregister() {
if m.reg == nil {
return
}
m.reg.Unregister(m.readerCorruptionErrors)
}
// NewLiveReader returns a new live reader.
func NewLiveReader(logger *slog.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader {
lr := &LiveReader{
logger: logger,
rdr: r,
decBuf: compression.NewSyncDecodeBuffer(),
metrics: metrics,
// Until we understand how they come about, make readers permissive
// to records spanning pages.
permissive: true,
}
return lr
}
// LiveReader reads WAL records from an io.Reader. It allows reading of WALs
// that are still in the process of being written, and returns records as soon
// as they can be read.
type LiveReader struct {
logger *slog.Logger
rdr io.Reader
err error
rec []byte
precomprBuf []byte
decBuf compression.DecodeBuffer
hdr [recordHeaderSize]byte
buf [pageSize]byte
readIndex int // Index in buf to start at for next read.
writeIndex int // Index in buf to start at for next write.
total int64 // Total bytes processed during reading in calls to Next().
index int // Used to track partial records, should be 0 at the start of every new record.
// For testing, we can treat EOF as a non-error.
eofNonErr bool
// We sometime see records span page boundaries. Should never happen, but it
// does. Until we track down why, set permissive to true to tolerate it.
// NB the non-ive Reader implementation allows for this.
permissive bool
metrics *LiveReaderMetrics
}
// Err returns any errors encountered reading the WAL. io.EOFs are not terminal
// and Next can be tried again. Non-EOFs are terminal, and the reader should
// not be used again. It is up to the user to decide when to stop trying should
// io.EOF be returned.
func (r *LiveReader) Err() error {
if r.eofNonErr && errors.Is(r.err, io.EOF) {
return nil
}
return r.err
}
// Offset returns the number of bytes consumed from this segment.
func (r *LiveReader) Offset() int64 {
return r.total
}
func (r *LiveReader) fillBuffer() (int, error) {
n, err := r.rdr.Read(r.buf[r.writeIndex:len(r.buf)])
r.writeIndex += n
return n, err
}
// Next returns true if Record() will contain a full record.
// If Next returns false, you should always checked the contents of Error().
// Return false guarantees there are no more records if the segment is closed
// and not corrupt, otherwise if Err() == io.EOF you should try again when more
// data has been written.
func (r *LiveReader) Next() bool {
for {
// If buildRecord returns a non-EOF error, its game up - the segment is
// corrupt. If buildRecord returns an EOF, we try and read more in
// fillBuffer later on. If that fails to read anything (n=0 && err=EOF),
// we return EOF and the user can try again later. If we have a full
// page, buildRecord is guaranteed to return a record or a non-EOF; it
// has checks the records fit in pages.
switch ok, err := r.buildRecord(); {
case ok:
return true
case err != nil && !errors.Is(err, io.EOF):
r.err = err
return false
}
// If we've filled the page and not found a record, this
// means records have started to span pages. Shouldn't happen
// but does and until we found out why, we need to deal with this.
if r.permissive && r.writeIndex == pageSize && r.readIndex > 0 {
copy(r.buf[:], r.buf[r.readIndex:])
r.writeIndex -= r.readIndex
r.readIndex = 0
continue
}
if r.readIndex == pageSize {
r.writeIndex = 0
r.readIndex = 0
}
if r.writeIndex != pageSize {
n, err := r.fillBuffer()
if n == 0 || (err != nil && !errors.Is(err, io.EOF)) {
r.err = err
return false
}
}
}
}
// Record returns the current record.
// The returned byte slice is only valid until the next call to Next.
func (r *LiveReader) Record() []byte {
return r.rec
}
// Rebuild a full record from potentially partial records. Returns false
// if there was an error or if we weren't able to read a record for any reason.
// Returns true if we read a full record. Any record data is appended to
// LiveReader.rec.
func (r *LiveReader) buildRecord() (bool, error) {
for {
// Check that we have data in the internal buffer to read.
if r.writeIndex <= r.readIndex {
return false, nil
}
// Attempt to read a record, partial or otherwise.
temp, n, err := r.readRecord()
if err != nil {
return false, err
}
r.readIndex += n
r.total += int64(n)
if temp == nil {
return false, nil
}
rt := recTypeFromHeader(r.hdr[0])
if rt == recFirst || rt == recFull {
r.precomprBuf = r.precomprBuf[:0]
}
// Segment format has only 2 bits, so it's either of those 3 options.
// https://github.com/prometheus/prometheus/blob/main/tsdb/docs/format/wal.md#records-encoding
compr := compression.None
if r.hdr[0]&snappyMask == snappyMask {
compr = compression.Snappy
} else if r.hdr[0]&zstdMask == zstdMask {
compr = compression.Zstd
}
r.precomprBuf = append(r.precomprBuf, temp...)
if err := validateRecord(rt, r.index); err != nil {
r.index = 0
return false, err
}
if rt == recLast || rt == recFull {
r.index = 0
r.rec, err = compression.Decode(compr, r.precomprBuf, r.decBuf)
if err != nil {
return false, err
}
return true, nil
}
// Only increment i for non-zero records since we use it
// to determine valid content record sequences.
r.index++
}
}
// Returns an error if the recType and i indicate an invalid record sequence.
// As an example, if i is > 0 because we've read some amount of a partial record
// (recFirst, recMiddle, etc. but not recLast) and then we get another recFirst or recFull
// instead of a recLast or recMiddle we would have an invalid record.
func validateRecord(typ recType, i int) error {
switch typ {
case recFull:
if i != 0 {
return errors.New("unexpected full record")
}
return nil
case recFirst:
if i != 0 {
return errors.New("unexpected first record, dropping buffer")
}
return nil
case recMiddle:
if i == 0 {
return errors.New("unexpected middle record, dropping buffer")
}
return nil
case recLast:
if i == 0 {
return errors.New("unexpected last record, dropping buffer")
}
return nil
default:
return fmt.Errorf("unexpected record type %d", typ)
}
}
// Read a sub-record (see recType) from the buffer. It could potentially
// be a full record (recFull) if the record fits within the bounds of a single page.
// Returns a byte slice of the record data read, the number of bytes read, and an error
// if there's a non-zero byte in a page term record or the record checksum fails.
// This is a non-method function to make it clear it does not mutate the reader.
func (r *LiveReader) readRecord() ([]byte, int, error) {
// Special case: for recPageTerm, check that are all zeros to end of page,
// consume them but don't return them.
if r.buf[r.readIndex] == byte(recPageTerm) {
// End of page won't necessarily be end of buffer, as we may have
// got misaligned by records spanning page boundaries.
// r.total % pageSize is the offset into the current page
// that r.readIndex points to in buf. Therefore
// pageSize - (r.total % pageSize) is the amount left to read of
// the current page.
remaining := int(pageSize - (r.total % pageSize))
if r.readIndex+remaining > r.writeIndex {
return nil, 0, io.EOF
}
for i := r.readIndex; i < r.readIndex+remaining; i++ {
if r.buf[i] != 0 {
return nil, 0, errors.New("unexpected non-zero byte in page term bytes")
}
}
return nil, remaining, nil
}
// Not a recPageTerm; read the record and check the checksum.
if r.writeIndex-r.readIndex < recordHeaderSize {
return nil, 0, io.EOF
}
copy(r.hdr[:], r.buf[r.readIndex:r.readIndex+recordHeaderSize])
length := int(binary.BigEndian.Uint16(r.hdr[1:]))
crc := binary.BigEndian.Uint32(r.hdr[3:])
if r.readIndex+recordHeaderSize+length > pageSize {
if !r.permissive {
return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize)
}
r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc()
r.logger.Warn("Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize)
}
if recordHeaderSize+length > pageSize {
return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize)
}
if r.readIndex+recordHeaderSize+length > r.writeIndex {
return nil, 0, io.EOF
}
rec := r.buf[r.readIndex+recordHeaderSize : r.readIndex+recordHeaderSize+length]
if c := crc32.Checksum(rec, castagnoliTable); c != crc {
return nil, 0, fmt.Errorf("unexpected checksum %x, expected %x", c, crc)
}
return rec, length + recordHeaderSize, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/record/record_test.go | tsdb/record/record_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package record
import (
"bytes"
"fmt"
"math/rand"
"testing"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/testutil"
)
func TestRecord_EncodeDecode(t *testing.T) {
var enc Encoder
dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
series := []RefSeries{
{
Ref: 100,
Labels: labels.FromStrings("abc", "def", "123", "456"),
}, {
Ref: 1,
Labels: labels.FromStrings("abc", "def2", "1234", "4567"),
}, {
Ref: 435245,
Labels: labels.FromStrings("xyz", "def", "foo", "bar"),
},
}
decSeries, err := dec.Series(enc.Series(series, nil), nil)
require.NoError(t, err)
testutil.RequireEqual(t, series, decSeries)
metadata := []RefMetadata{
{
Ref: 100,
Type: uint8(Counter),
Unit: "",
Help: "some magic counter",
},
{
Ref: 1,
Type: uint8(Counter),
Unit: "seconds",
Help: "CPU time counter",
},
{
Ref: 147741,
Type: uint8(Gauge),
Unit: "percentage",
Help: "current memory usage",
},
}
decMetadata, err := dec.Metadata(enc.Metadata(metadata, nil), nil)
require.NoError(t, err)
require.Equal(t, metadata, decMetadata)
samples := []RefSample{
{Ref: 0, T: 12423423, V: 1.2345},
{Ref: 123, T: -1231, V: -123},
{Ref: 2, T: 0, V: 99999},
}
decSamples, err := dec.Samples(enc.Samples(samples, nil), nil)
require.NoError(t, err)
require.Equal(t, samples, decSamples)
// Intervals get split up into single entries. So we don't get back exactly
// what we put in.
tstones := []tombstones.Stone{
{Ref: 123, Intervals: tombstones.Intervals{
{Mint: -1000, Maxt: 1231231},
{Mint: 5000, Maxt: 0},
}},
{Ref: 13, Intervals: tombstones.Intervals{
{Mint: -1000, Maxt: -11},
{Mint: 5000, Maxt: 1000},
}},
}
decTstones, err := dec.Tombstones(enc.Tombstones(tstones, nil), nil)
require.NoError(t, err)
require.Equal(t, []tombstones.Stone{
{Ref: 123, Intervals: tombstones.Intervals{{Mint: -1000, Maxt: 1231231}}},
{Ref: 123, Intervals: tombstones.Intervals{{Mint: 5000, Maxt: 0}}},
{Ref: 13, Intervals: tombstones.Intervals{{Mint: -1000, Maxt: -11}}},
{Ref: 13, Intervals: tombstones.Intervals{{Mint: 5000, Maxt: 1000}}},
}, decTstones)
exemplars := []RefExemplar{
{Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("trace_id", "qwerty")},
{Ref: 123, T: -1231, V: -123, Labels: labels.FromStrings("trace_id", "asdf")},
{Ref: 2, T: 0, V: 99999, Labels: labels.FromStrings("trace_id", "zxcv")},
}
decExemplars, err := dec.Exemplars(enc.Exemplars(exemplars, nil), nil)
require.NoError(t, err)
testutil.RequireEqual(t, exemplars, decExemplars)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
{
Ref: 42,
T: 5678,
H: &histogram.Histogram{
Count: 11,
ZeroCount: 4,
ZeroThreshold: 0.001,
Sum: 35.5,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 2, -1},
},
},
{
Ref: 67,
T: 5678,
H: &histogram.Histogram{
Count: 8,
ZeroThreshold: 0.001,
Sum: 35.5,
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
PositiveBuckets: []int64{2, -1, 2, 0},
CustomValues: []float64{0, 2, 4, 6, 8},
},
},
}
histSamples, customBucketsHistograms := enc.HistogramSamples(histograms, nil)
customBucketsHistSamples := enc.CustomBucketsHistogramSamples(customBucketsHistograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
decCustomBucketsHistograms, err := dec.HistogramSamples(customBucketsHistSamples, nil)
require.NoError(t, err)
decHistograms = append(decHistograms, decCustomBucketsHistograms...)
require.Equal(t, histograms, decHistograms)
floatHistograms := make([]RefFloatHistogramSample, len(histograms))
for i, h := range histograms {
floatHistograms[i] = RefFloatHistogramSample{
Ref: h.Ref,
T: h.T,
FH: h.H.ToFloat(nil),
}
}
floatHistSamples, customBucketsFloatHistograms := enc.FloatHistogramSamples(floatHistograms, nil)
customBucketsFloatHistSamples := enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, nil)
decFloatHistograms, err := dec.FloatHistogramSamples(floatHistSamples, nil)
require.NoError(t, err)
decCustomBucketsFloatHistograms, err := dec.FloatHistogramSamples(customBucketsFloatHistSamples, nil)
require.NoError(t, err)
decFloatHistograms = append(decFloatHistograms, decCustomBucketsFloatHistograms...)
require.Equal(t, floatHistograms, decFloatHistograms)
// Gauge integer histograms.
for i := range histograms {
histograms[i].H.CounterResetHint = histogram.GaugeType
}
gaugeHistSamples, customBucketsGaugeHistograms := enc.HistogramSamples(histograms, nil)
customBucketsGaugeHistSamples := enc.CustomBucketsHistogramSamples(customBucketsGaugeHistograms, nil)
decGaugeHistograms, err := dec.HistogramSamples(gaugeHistSamples, nil)
require.NoError(t, err)
decCustomBucketsGaugeHistograms, err := dec.HistogramSamples(customBucketsGaugeHistSamples, nil)
require.NoError(t, err)
decGaugeHistograms = append(decGaugeHistograms, decCustomBucketsGaugeHistograms...)
require.Equal(t, histograms, decGaugeHistograms)
// Gauge float histograms.
for i := range floatHistograms {
floatHistograms[i].FH.CounterResetHint = histogram.GaugeType
}
gaugeFloatHistSamples, customBucketsGaugeFloatHistograms := enc.FloatHistogramSamples(floatHistograms, nil)
customBucketsGaugeFloatHistSamples := enc.CustomBucketsFloatHistogramSamples(customBucketsGaugeFloatHistograms, nil)
decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeFloatHistSamples, nil)
require.NoError(t, err)
decCustomBucketsGaugeFloatHistograms, err := dec.FloatHistogramSamples(customBucketsGaugeFloatHistSamples, nil)
require.NoError(t, err)
decGaugeFloatHistograms = append(decGaugeFloatHistograms, decCustomBucketsGaugeFloatHistograms...)
require.Equal(t, floatHistograms, decGaugeFloatHistograms)
}
func TestRecord_DecodeInvalidHistogramSchema(t *testing.T) {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
}
func TestRecord_DecodeInvalidFloatHistogramSchema(t *testing.T) {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
}
func TestRecord_DecodeTooHighResolutionHistogramSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].H.Schema)
})
}
}
func TestRecord_DecodeTooHighResolutionFloatHistogramSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].FH.Schema)
})
}
}
// TestRecord_Corrupted ensures that corrupted records return the correct error.
// Bugfix check for pull/521 and pull/523.
func TestRecord_Corrupted(t *testing.T) {
var enc Encoder
dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
t.Run("Test corrupted series record", func(t *testing.T) {
series := []RefSeries{
{
Ref: 100,
Labels: labels.FromStrings("abc", "def", "123", "456"),
},
}
corrupted := enc.Series(series, nil)[:8]
_, err := dec.Series(corrupted, nil)
require.Equal(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted sample record", func(t *testing.T) {
samples := []RefSample{
{Ref: 0, T: 12423423, V: 1.2345},
}
corrupted := enc.Samples(samples, nil)[:8]
_, err := dec.Samples(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted tombstone record", func(t *testing.T) {
tstones := []tombstones.Stone{
{Ref: 123, Intervals: tombstones.Intervals{
{Mint: -1000, Maxt: 1231231},
{Mint: 5000, Maxt: 0},
}},
}
corrupted := enc.Tombstones(tstones, nil)[:8]
_, err := dec.Tombstones(corrupted, nil)
require.Equal(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted exemplar record", func(t *testing.T) {
exemplars := []RefExemplar{
{Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("trace_id", "asdf")},
}
corrupted := enc.Exemplars(exemplars, nil)[:8]
_, err := dec.Exemplars(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted metadata record", func(t *testing.T) {
meta := []RefMetadata{
{Ref: 147, Type: uint8(Counter), Unit: "unit", Help: "help"},
}
corrupted := enc.Metadata(meta, nil)[:8]
_, err := dec.Metadata(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted histogram record", func(t *testing.T) {
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
{
Ref: 67,
T: 5678,
H: &histogram.Histogram{
Count: 8,
ZeroThreshold: 0.001,
Sum: 35.5,
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
PositiveBuckets: []int64{2, -1, 2, 0},
CustomValues: []float64{0, 2, 4, 6, 8},
},
},
}
corruptedHists, customBucketsHists := enc.HistogramSamples(histograms, nil)
corruptedHists = corruptedHists[:8]
corruptedCustomBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHists, nil)
corruptedCustomBucketsHists = corruptedCustomBucketsHists[:8]
_, err := dec.HistogramSamples(corruptedHists, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
_, err = dec.HistogramSamples(corruptedCustomBucketsHists, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
}
func TestRecord_Type(t *testing.T) {
var enc Encoder
var dec Decoder
series := []RefSeries{{Ref: 100, Labels: labels.FromStrings("abc", "123")}}
recordType := dec.Type(enc.Series(series, nil))
require.Equal(t, Series, recordType)
samples := []RefSample{{Ref: 123, T: 12345, V: 1.2345}}
recordType = dec.Type(enc.Samples(samples, nil))
require.Equal(t, Samples, recordType)
tstones := []tombstones.Stone{{Ref: 1, Intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}}}}
recordType = dec.Type(enc.Tombstones(tstones, nil))
require.Equal(t, Tombstones, recordType)
metadata := []RefMetadata{{Ref: 147, Type: uint8(Counter), Unit: "unit", Help: "help"}}
recordType = dec.Type(enc.Metadata(metadata, nil))
require.Equal(t, Metadata, recordType)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
{
Ref: 67,
T: 5678,
H: &histogram.Histogram{
Count: 8,
ZeroThreshold: 0.001,
Sum: 35.5,
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
PositiveBuckets: []int64{2, -1, 2, 0},
CustomValues: []float64{0, 2, 4, 6, 8},
},
},
}
hists, customBucketsHistograms := enc.HistogramSamples(histograms, nil)
recordType = dec.Type(hists)
require.Equal(t, HistogramSamples, recordType)
customBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHistograms, nil)
recordType = dec.Type(customBucketsHists)
require.Equal(t, CustomBucketsHistogramSamples, recordType)
recordType = dec.Type(nil)
require.Equal(t, Unknown, recordType)
recordType = dec.Type([]byte{0})
require.Equal(t, Unknown, recordType)
}
func TestRecord_MetadataDecodeUnknownExtraFields(t *testing.T) {
var enc encoding.Encbuf
var dec Decoder
// Write record type.
enc.PutByte(byte(Metadata))
// Write first metadata entry, all known fields.
enc.PutUvarint64(101)
enc.PutByte(byte(Counter))
enc.PutUvarint(2)
enc.PutUvarintStr(unitMetaName)
enc.PutUvarintStr("")
enc.PutUvarintStr(helpMetaName)
enc.PutUvarintStr("some magic counter")
// Write second metadata entry, known fields + unknown fields.
enc.PutUvarint64(99)
enc.PutByte(byte(Counter))
enc.PutUvarint(3)
// Known fields.
enc.PutUvarintStr(unitMetaName)
enc.PutUvarintStr("seconds")
enc.PutUvarintStr(helpMetaName)
enc.PutUvarintStr("CPU time counter")
// Unknown fields.
enc.PutUvarintStr("an extra field name to be skipped")
enc.PutUvarintStr("with its value")
// Write third metadata entry, with unknown fields and different order.
enc.PutUvarint64(47250)
enc.PutByte(byte(Gauge))
enc.PutUvarint(4)
enc.PutUvarintStr("extra name one")
enc.PutUvarintStr("extra value one")
enc.PutUvarintStr(helpMetaName)
enc.PutUvarintStr("current memory usage")
enc.PutUvarintStr("extra name two")
enc.PutUvarintStr("extra value two")
enc.PutUvarintStr(unitMetaName)
enc.PutUvarintStr("percentage")
// Should yield known fields for all entries and skip over unknown fields.
expectedMetadata := []RefMetadata{
{
Ref: 101,
Type: uint8(Counter),
Unit: "",
Help: "some magic counter",
}, {
Ref: 99,
Type: uint8(Counter),
Unit: "seconds",
Help: "CPU time counter",
}, {
Ref: 47250,
Type: uint8(Gauge),
Unit: "percentage",
Help: "current memory usage",
},
}
decMetadata, err := dec.Metadata(enc.Get(), nil)
require.NoError(t, err)
require.Equal(t, expectedMetadata, decMetadata)
}
type refsCreateFn func(labelCount, histograms, buckets int) ([]RefSeries, []RefSample, []RefHistogramSample)
type recordsMaker struct {
name string
make refsCreateFn
}
// BenchmarkWAL_HistogramEncoding measures efficiency of encoding classic
// histograms and native histograms with custom buckets (NHCB).
func BenchmarkWAL_HistogramEncoding(b *testing.B) {
initClassicRefs := func(labelCount, histograms, buckets int) (series []RefSeries, floatSamples []RefSample, histSamples []RefHistogramSample) {
ref := chunks.HeadSeriesRef(0)
lbls := map[string]string{}
for i := range labelCount {
lbls[fmt.Sprintf("l%d", i)] = fmt.Sprintf("v%d", i)
}
for i := range histograms {
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_count", i)
series = append(series, RefSeries{
Ref: ref,
Labels: labels.FromMap(lbls),
})
floatSamples = append(floatSamples, RefSample{
Ref: ref,
T: 100,
V: float64(i),
})
ref++
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_sum", i)
series = append(series, RefSeries{
Ref: ref,
Labels: labels.FromMap(lbls),
})
floatSamples = append(floatSamples, RefSample{
Ref: ref,
T: 100,
V: float64(i),
})
ref++
if buckets == 0 {
continue
}
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_bucket", i)
for j := range buckets {
lbls[model.BucketLabel] = fmt.Sprintf("%d.0", j)
series = append(series, RefSeries{
Ref: ref,
Labels: labels.FromMap(lbls),
})
floatSamples = append(floatSamples, RefSample{
Ref: ref,
T: 100,
V: float64(i + j),
})
ref++
}
delete(lbls, model.BucketLabel)
}
return series, floatSamples, histSamples
}
initNHCBRefs := func(labelCount, histograms, buckets int) (series []RefSeries, floatSamples []RefSample, histSamples []RefHistogramSample) {
ref := chunks.HeadSeriesRef(0)
lbls := map[string]string{}
for i := range labelCount {
lbls[fmt.Sprintf("l%d", i)] = fmt.Sprintf("v%d", i)
}
for i := range histograms {
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d", i)
series = append(series, RefSeries{
Ref: ref,
Labels: labels.FromMap(lbls),
})
h := &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: uint64(i),
Sum: float64(i),
PositiveSpans: []histogram.Span{{Length: uint32(buckets)}},
PositiveBuckets: make([]int64, buckets+1),
CustomValues: make([]float64, buckets),
}
for j := range buckets {
h.PositiveBuckets[j] = int64(i + j)
}
histSamples = append(histSamples, RefHistogramSample{
Ref: ref,
T: 100,
H: h,
})
ref++
}
return series, floatSamples, histSamples
}
for _, maker := range []recordsMaker{
{
name: "classic",
make: initClassicRefs,
},
{
name: "nhcb",
make: initNHCBRefs,
},
} {
for _, labelCount := range []int{0, 10, 50} {
for _, histograms := range []int{10, 100, 1000} {
for _, buckets := range []int{0, 1, 10, 100} {
b.Run(fmt.Sprintf("type=%s/labels=%d/histograms=%d/buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) {
series, samples, nhcbs := maker.make(labelCount, histograms, buckets)
enc := Encoder{}
for b.Loop() {
var buf []byte
enc.Series(series, buf)
enc.Samples(samples, buf)
var leftOver []RefHistogramSample
_, leftOver = enc.HistogramSamples(nhcbs, buf)
if len(leftOver) > 0 {
enc.CustomBucketsHistogramSamples(leftOver, buf)
}
b.ReportMetric(float64(len(buf)), "recordBytes/ops")
}
})
}
}
}
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/record/record.go | tsdb/record/record.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package record contains the various record types used for encoding various Head block data in the WAL and in-memory snapshot.
package record
import (
"errors"
"fmt"
"log/slog"
"math"
"unsafe"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
// Type represents the data type of a record.
type Type uint8
const (
// Unknown is returned for unrecognised WAL record types.
Unknown Type = 255
// Series is used to match WAL records of type Series.
Series Type = 1
// Samples is used to match WAL records of type Samples.
Samples Type = 2
// Tombstones is used to match WAL records of type Tombstones.
Tombstones Type = 3
// Exemplars is used to match WAL records of type Exemplars.
Exemplars Type = 4
// MmapMarkers is used to match OOO WBL records of type MmapMarkers.
MmapMarkers Type = 5
// Metadata is used to match WAL records of type Metadata.
Metadata Type = 6
// HistogramSamples is used to match WAL records of type Histograms.
HistogramSamples Type = 7
// FloatHistogramSamples is used to match WAL records of type Float Histograms.
FloatHistogramSamples Type = 8
// CustomBucketsHistogramSamples is used to match WAL records of type Histogram with custom buckets.
CustomBucketsHistogramSamples Type = 9
// CustomBucketsFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets.
CustomBucketsFloatHistogramSamples Type = 10
)
func (rt Type) String() string {
switch rt {
case Series:
return "series"
case Samples:
return "samples"
case Tombstones:
return "tombstones"
case Exemplars:
return "exemplars"
case HistogramSamples:
return "histogram_samples"
case FloatHistogramSamples:
return "float_histogram_samples"
case CustomBucketsHistogramSamples:
return "custom_buckets_histogram_samples"
case CustomBucketsFloatHistogramSamples:
return "custom_buckets_float_histogram_samples"
case MmapMarkers:
return "mmapmarkers"
case Metadata:
return "metadata"
default:
return "unknown"
}
}
// MetricType represents the type of a series.
type MetricType uint8
const (
UnknownMT MetricType = 0
Counter MetricType = 1
Gauge MetricType = 2
HistogramSample MetricType = 3
GaugeHistogram MetricType = 4
Summary MetricType = 5
Info MetricType = 6
Stateset MetricType = 7
)
func GetMetricType(t model.MetricType) uint8 {
switch t {
case model.MetricTypeCounter:
return uint8(Counter)
case model.MetricTypeGauge:
return uint8(Gauge)
case model.MetricTypeHistogram:
return uint8(HistogramSample)
case model.MetricTypeGaugeHistogram:
return uint8(GaugeHistogram)
case model.MetricTypeSummary:
return uint8(Summary)
case model.MetricTypeInfo:
return uint8(Info)
case model.MetricTypeStateset:
return uint8(Stateset)
default:
return uint8(UnknownMT)
}
}
func ToMetricType(m uint8) model.MetricType {
switch m {
case uint8(Counter):
return model.MetricTypeCounter
case uint8(Gauge):
return model.MetricTypeGauge
case uint8(HistogramSample):
return model.MetricTypeHistogram
case uint8(GaugeHistogram):
return model.MetricTypeGaugeHistogram
case uint8(Summary):
return model.MetricTypeSummary
case uint8(Info):
return model.MetricTypeInfo
case uint8(Stateset):
return model.MetricTypeStateset
default:
return model.MetricTypeUnknown
}
}
const (
unitMetaName = "UNIT"
helpMetaName = "HELP"
)
// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go.
var ErrNotFound = errors.New("not found")
// RefSeries is the series labels with the series ID.
type RefSeries struct {
Ref chunks.HeadSeriesRef
Labels labels.Labels
}
// RefSample is a timestamp/value pair associated with a reference to a series.
// TODO(beorn7): Perhaps make this "polymorphic", including histogram and float-histogram pointers? Then get rid of RefHistogramSample.
type RefSample struct {
Ref chunks.HeadSeriesRef
T int64
V float64
}
// RefMetadata is the metadata associated with a series ID.
type RefMetadata struct {
Ref chunks.HeadSeriesRef
Type uint8
Unit string
Help string
}
// RefExemplar is an exemplar with the labels, timestamp, value the exemplar was collected/observed with, and a reference to a series.
type RefExemplar struct {
Ref chunks.HeadSeriesRef
T int64
V float64
Labels labels.Labels
}
// RefHistogramSample is a histogram.
type RefHistogramSample struct {
Ref chunks.HeadSeriesRef
T int64
H *histogram.Histogram
}
// RefFloatHistogramSample is a float histogram.
type RefFloatHistogramSample struct {
Ref chunks.HeadSeriesRef
T int64
FH *histogram.FloatHistogram
}
// RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk.
type RefMmapMarker struct {
Ref chunks.HeadSeriesRef
MmapRef chunks.ChunkDiskMapperRef
}
// Decoder decodes series, sample, metadata and tombstone records.
type Decoder struct {
builder labels.ScratchBuilder
logger *slog.Logger
}
func NewDecoder(_ *labels.SymbolTable, logger *slog.Logger) Decoder { // FIXME remove t (or use scratch builder with symbols)
b := labels.NewScratchBuilder(0)
b.SetUnsafeAdd(true)
return Decoder{builder: b, logger: logger}
}
// Type returns the type of the record.
// Returns RecordUnknown if no valid record type is found.
func (*Decoder) Type(rec []byte) Type {
if len(rec) < 1 {
return Unknown
}
switch t := Type(rec[0]); t {
case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples:
return t
}
return Unknown
}
// Series appends series in rec to the given slice.
func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Series {
return nil, errors.New("invalid record type")
}
for len(dec.B) > 0 && dec.Err() == nil {
ref := storage.SeriesRef(dec.Be64())
lset := d.DecodeLabels(&dec)
series = append(series, RefSeries{
Ref: chunks.HeadSeriesRef(ref),
Labels: lset,
})
}
if dec.Err() != nil {
return nil, dec.Err()
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return series, nil
}
// Metadata appends metadata in rec to the given slice.
func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Metadata {
return nil, errors.New("invalid record type")
}
for len(dec.B) > 0 && dec.Err() == nil {
ref := dec.Uvarint64()
typ := dec.Byte()
numFields := dec.Uvarint()
// We're currently aware of two more metadata fields other than TYPE; that is UNIT and HELP.
// We can skip the rest of the fields (if we encounter any), but we must decode them anyway
// so we can correctly align with the start with the next metadata record.
var unit, help string
for range numFields {
fieldName := dec.UvarintStr()
fieldValue := dec.UvarintStr()
switch fieldName {
case unitMetaName:
unit = fieldValue
case helpMetaName:
help = fieldValue
}
}
metadata = append(metadata, RefMetadata{
Ref: chunks.HeadSeriesRef(ref),
Type: typ,
Unit: unit,
Help: help,
})
}
if dec.Err() != nil {
return nil, dec.Err()
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return metadata, nil
}
func yoloString(b []byte) string {
return unsafe.String(unsafe.SliceData(b), len(b))
}
// DecodeLabels decodes one set of labels from buf.
func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels {
d.builder.Reset()
nLabels := dec.Uvarint()
for range nLabels {
lName := dec.UvarintBytes()
lValue := dec.UvarintBytes()
d.builder.Add(yoloString(lName), yoloString(lValue))
}
return d.builder.Labels()
}
// Samples appends samples in rec to the given slice.
func (*Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Samples {
return nil, errors.New("invalid record type")
}
if dec.Len() == 0 {
return samples, nil
}
var (
baseRef = dec.Be64()
baseTime = dec.Be64int64()
)
// Allow 1 byte for each varint and 8 for the value; the output slice must be at least that big.
if minSize := dec.Len() / (1 + 1 + 8); cap(samples) < minSize {
samples = make([]RefSample, 0, minSize)
}
for len(dec.B) > 0 && dec.Err() == nil {
dref := dec.Varint64()
dtime := dec.Varint64()
val := dec.Be64()
samples = append(samples, RefSample{
Ref: chunks.HeadSeriesRef(int64(baseRef) + dref),
T: baseTime + dtime,
V: math.Float64frombits(val),
})
}
if dec.Err() != nil {
return nil, fmt.Errorf("decode error after %d samples: %w", len(samples), dec.Err())
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return samples, nil
}
// Tombstones appends tombstones in rec to the given slice.
func (*Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Tombstones {
return nil, errors.New("invalid record type")
}
for dec.Len() > 0 && dec.Err() == nil {
tstones = append(tstones, tombstones.Stone{
Ref: storage.SeriesRef(dec.Be64()),
Intervals: tombstones.Intervals{
{Mint: dec.Varint64(), Maxt: dec.Varint64()},
},
})
}
if dec.Err() != nil {
return nil, dec.Err()
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return tstones, nil
}
func (d *Decoder) Exemplars(rec []byte, exemplars []RefExemplar) ([]RefExemplar, error) {
dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte())
if t != Exemplars {
return nil, errors.New("invalid record type")
}
return d.ExemplarsFromBuffer(&dec, exemplars)
}
func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemplar) ([]RefExemplar, error) {
if dec.Len() == 0 {
return exemplars, nil
}
var (
baseRef = dec.Be64()
baseTime = dec.Be64int64()
)
for len(dec.B) > 0 && dec.Err() == nil {
dref := dec.Varint64()
dtime := dec.Varint64()
val := dec.Be64()
lset := d.DecodeLabels(dec)
exemplars = append(exemplars, RefExemplar{
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
T: baseTime + dtime,
V: math.Float64frombits(val),
Labels: lset,
})
}
if dec.Err() != nil {
return nil, fmt.Errorf("decode error after %d exemplars: %w", len(exemplars), dec.Err())
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return exemplars, nil
}
func (*Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarker, error) {
dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte())
if t != MmapMarkers {
return nil, errors.New("invalid record type")
}
if dec.Len() == 0 {
return markers, nil
}
for len(dec.B) > 0 && dec.Err() == nil {
ref := chunks.HeadSeriesRef(dec.Be64())
mmapRef := chunks.ChunkDiskMapperRef(dec.Be64())
markers = append(markers, RefMmapMarker{
Ref: ref,
MmapRef: mmapRef,
})
}
if dec.Err() != nil {
return nil, fmt.Errorf("decode error after %d mmap markers: %w", len(markers), dec.Err())
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return markers, nil
}
func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) {
dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte())
if t != HistogramSamples && t != CustomBucketsHistogramSamples {
return nil, errors.New("invalid record type")
}
if dec.Len() == 0 {
return histograms, nil
}
var (
baseRef = dec.Be64()
baseTime = dec.Be64int64()
)
for len(dec.B) > 0 && dec.Err() == nil {
dref := dec.Varint64()
dtime := dec.Varint64()
rh := RefHistogramSample{
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
T: baseTime + dtime,
H: &histogram.Histogram{},
}
DecodeHistogram(&dec, rh.H)
if !histogram.IsKnownSchema(rh.H.Schema) {
d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.H.Schema, "timestamp", rh.T)
continue
}
if rh.H.Schema > histogram.ExponentialSchemaMax && rh.H.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// record is from a newer Prometheus version that supports higher
// resolution.
if err := rh.H.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
return nil, fmt.Errorf("error reducing resolution of histogram #%d: %w", len(histograms)+1, err)
}
}
histograms = append(histograms, rh)
}
if dec.Err() != nil {
return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err())
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return histograms, nil
}
// DecodeHistogram decodes a Histogram from a byte slice.
func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) {
h.CounterResetHint = histogram.CounterResetHint(buf.Byte())
h.Schema = int32(buf.Varint64())
h.ZeroThreshold = math.Float64frombits(buf.Be64())
h.ZeroCount = buf.Uvarint64()
h.Count = buf.Uvarint64()
h.Sum = math.Float64frombits(buf.Be64())
l := buf.Uvarint()
if l > 0 {
h.PositiveSpans = make([]histogram.Span, l)
}
for i := range h.PositiveSpans {
h.PositiveSpans[i].Offset = int32(buf.Varint64())
h.PositiveSpans[i].Length = buf.Uvarint32()
}
l = buf.Uvarint()
if l > 0 {
h.NegativeSpans = make([]histogram.Span, l)
}
for i := range h.NegativeSpans {
h.NegativeSpans[i].Offset = int32(buf.Varint64())
h.NegativeSpans[i].Length = buf.Uvarint32()
}
l = buf.Uvarint()
if l > 0 {
h.PositiveBuckets = make([]int64, l)
}
for i := range h.PositiveBuckets {
h.PositiveBuckets[i] = buf.Varint64()
}
l = buf.Uvarint()
if l > 0 {
h.NegativeBuckets = make([]int64, l)
}
for i := range h.NegativeBuckets {
h.NegativeBuckets[i] = buf.Varint64()
}
if histogram.IsCustomBucketsSchema(h.Schema) {
l = buf.Uvarint()
if l > 0 {
h.CustomValues = make([]float64, l)
}
for i := range h.CustomValues {
h.CustomValues[i] = buf.Be64Float64()
}
}
}
func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte())
if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples {
return nil, errors.New("invalid record type")
}
if dec.Len() == 0 {
return histograms, nil
}
var (
baseRef = dec.Be64()
baseTime = dec.Be64int64()
)
for len(dec.B) > 0 && dec.Err() == nil {
dref := dec.Varint64()
dtime := dec.Varint64()
rh := RefFloatHistogramSample{
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
T: baseTime + dtime,
FH: &histogram.FloatHistogram{},
}
DecodeFloatHistogram(&dec, rh.FH)
if !histogram.IsKnownSchema(rh.FH.Schema) {
d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.FH.Schema, "timestamp", rh.T)
continue
}
if rh.FH.Schema > histogram.ExponentialSchemaMax && rh.FH.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// record is from a newer Prometheus version that supports higher
// resolution.
if err := rh.FH.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
return nil, fmt.Errorf("error reducing resolution of histogram #%d: %w", len(histograms)+1, err)
}
}
histograms = append(histograms, rh)
}
if dec.Err() != nil {
return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err())
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return histograms, nil
}
// DecodeFloatHistogram decodes a Histogram from a byte slice.
func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
fh.CounterResetHint = histogram.CounterResetHint(buf.Byte())
fh.Schema = int32(buf.Varint64())
fh.ZeroThreshold = buf.Be64Float64()
fh.ZeroCount = buf.Be64Float64()
fh.Count = buf.Be64Float64()
fh.Sum = buf.Be64Float64()
l := buf.Uvarint()
if l > 0 {
fh.PositiveSpans = make([]histogram.Span, l)
}
for i := range fh.PositiveSpans {
fh.PositiveSpans[i].Offset = int32(buf.Varint64())
fh.PositiveSpans[i].Length = buf.Uvarint32()
}
l = buf.Uvarint()
if l > 0 {
fh.NegativeSpans = make([]histogram.Span, l)
}
for i := range fh.NegativeSpans {
fh.NegativeSpans[i].Offset = int32(buf.Varint64())
fh.NegativeSpans[i].Length = buf.Uvarint32()
}
l = buf.Uvarint()
if l > 0 {
fh.PositiveBuckets = make([]float64, l)
}
for i := range fh.PositiveBuckets {
fh.PositiveBuckets[i] = buf.Be64Float64()
}
l = buf.Uvarint()
if l > 0 {
fh.NegativeBuckets = make([]float64, l)
}
for i := range fh.NegativeBuckets {
fh.NegativeBuckets[i] = buf.Be64Float64()
}
if histogram.IsCustomBucketsSchema(fh.Schema) {
l = buf.Uvarint()
if l > 0 {
fh.CustomValues = make([]float64, l)
}
for i := range fh.CustomValues {
fh.CustomValues[i] = buf.Be64Float64()
}
}
}
// Encoder encodes series, sample, and tombstones records.
// The zero value is ready to use.
type Encoder struct{}
// Series appends the encoded series to b and returns the resulting slice.
func (*Encoder) Series(series []RefSeries, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Series))
for _, s := range series {
buf.PutBE64(uint64(s.Ref))
EncodeLabels(&buf, s.Labels)
}
return buf.Get()
}
// Metadata appends the encoded metadata to b and returns the resulting slice.
func (*Encoder) Metadata(metadata []RefMetadata, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Metadata))
for _, m := range metadata {
buf.PutUvarint64(uint64(m.Ref))
buf.PutByte(m.Type)
buf.PutUvarint(2) // num_fields: We currently have two more metadata fields, UNIT and HELP.
buf.PutUvarintStr(unitMetaName)
buf.PutUvarintStr(m.Unit)
buf.PutUvarintStr(helpMetaName)
buf.PutUvarintStr(m.Help)
}
return buf.Get()
}
// EncodeLabels encodes the contents of labels into buf.
func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) {
// TODO: reconsider if this function could be pushed down into labels.Labels to be more efficient.
buf.PutUvarint(lbls.Len())
lbls.Range(func(l labels.Label) {
buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value)
})
}
// Samples appends the encoded samples to b and returns the resulting slice.
func (*Encoder) Samples(samples []RefSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Samples))
if len(samples) == 0 {
return buf.Get()
}
// Store base timestamp and base reference number of first sample.
// All samples encode their timestamp and ref as delta to those.
first := samples[0]
buf.PutBE64(uint64(first.Ref))
buf.PutBE64int64(first.T)
for _, s := range samples {
buf.PutVarint64(int64(s.Ref) - int64(first.Ref))
buf.PutVarint64(s.T - first.T)
buf.PutBE64(math.Float64bits(s.V))
}
return buf.Get()
}
// Tombstones appends the encoded tombstones to b and returns the resulting slice.
func (*Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Tombstones))
for _, s := range tstones {
for _, iv := range s.Intervals {
buf.PutBE64(uint64(s.Ref))
buf.PutVarint64(iv.Mint)
buf.PutVarint64(iv.Maxt)
}
}
return buf.Get()
}
func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Exemplars))
if len(exemplars) == 0 {
return buf.Get()
}
e.EncodeExemplarsIntoBuffer(exemplars, &buf)
return buf.Get()
}
func (*Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) {
// Store base timestamp and base reference number of first sample.
// All samples encode their timestamp and ref as delta to those.
first := exemplars[0]
buf.PutBE64(uint64(first.Ref))
buf.PutBE64int64(first.T)
for _, ex := range exemplars {
buf.PutVarint64(int64(ex.Ref) - int64(first.Ref))
buf.PutVarint64(ex.T - first.T)
buf.PutBE64(math.Float64bits(ex.V))
EncodeLabels(buf, ex.Labels)
}
}
func (*Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(MmapMarkers))
for _, s := range markers {
buf.PutBE64(uint64(s.Ref))
buf.PutBE64(uint64(s.MmapRef))
}
return buf.Get()
}
func (*Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(HistogramSamples))
if len(histograms) == 0 {
return buf.Get(), nil
}
var customBucketHistograms []RefHistogramSample
// Store base timestamp and base reference number of first histogram.
// All histograms encode their timestamp and ref as delta to those.
first := histograms[0]
buf.PutBE64(uint64(first.Ref))
buf.PutBE64int64(first.T)
for _, h := range histograms {
if h.H.UsesCustomBuckets() {
customBucketHistograms = append(customBucketHistograms, h)
continue
}
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T)
EncodeHistogram(&buf, h.H)
}
// Reset buffer if only custom bucket histograms existed in list of histogram samples.
if len(histograms) == len(customBucketHistograms) {
buf.Reset()
}
return buf.Get(), customBucketHistograms
}
func (*Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(CustomBucketsHistogramSamples))
if len(histograms) == 0 {
return buf.Get()
}
// Store base timestamp and base reference number of first histogram.
// All histograms encode their timestamp and ref as delta to those.
first := histograms[0]
buf.PutBE64(uint64(first.Ref))
buf.PutBE64int64(first.T)
for _, h := range histograms {
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T)
EncodeHistogram(&buf, h.H)
}
return buf.Get()
}
// EncodeHistogram encodes a Histogram into a byte slice.
func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) {
buf.PutByte(byte(h.CounterResetHint))
buf.PutVarint64(int64(h.Schema))
buf.PutBE64(math.Float64bits(h.ZeroThreshold))
buf.PutUvarint64(h.ZeroCount)
buf.PutUvarint64(h.Count)
buf.PutBE64(math.Float64bits(h.Sum))
buf.PutUvarint(len(h.PositiveSpans))
for _, s := range h.PositiveSpans {
buf.PutVarint64(int64(s.Offset))
buf.PutUvarint32(s.Length)
}
buf.PutUvarint(len(h.NegativeSpans))
for _, s := range h.NegativeSpans {
buf.PutVarint64(int64(s.Offset))
buf.PutUvarint32(s.Length)
}
buf.PutUvarint(len(h.PositiveBuckets))
for _, b := range h.PositiveBuckets {
buf.PutVarint64(b)
}
buf.PutUvarint(len(h.NegativeBuckets))
for _, b := range h.NegativeBuckets {
buf.PutVarint64(b)
}
if histogram.IsCustomBucketsSchema(h.Schema) {
buf.PutUvarint(len(h.CustomValues))
for _, v := range h.CustomValues {
buf.PutBEFloat64(v)
}
}
}
func (*Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(FloatHistogramSamples))
if len(histograms) == 0 {
return buf.Get(), nil
}
var customBucketsFloatHistograms []RefFloatHistogramSample
// Store base timestamp and base reference number of first histogram.
// All histograms encode their timestamp and ref as delta to those.
first := histograms[0]
buf.PutBE64(uint64(first.Ref))
buf.PutBE64int64(first.T)
for _, h := range histograms {
if h.FH.UsesCustomBuckets() {
customBucketsFloatHistograms = append(customBucketsFloatHistograms, h)
continue
}
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T)
EncodeFloatHistogram(&buf, h.FH)
}
// Reset buffer if only custom bucket histograms existed in list of histogram samples
if len(histograms) == len(customBucketsFloatHistograms) {
buf.Reset()
}
return buf.Get(), customBucketsFloatHistograms
}
func (*Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(CustomBucketsFloatHistogramSamples))
if len(histograms) == 0 {
return buf.Get()
}
// Store base timestamp and base reference number of first histogram.
// All histograms encode their timestamp and ref as delta to those.
first := histograms[0]
buf.PutBE64(uint64(first.Ref))
buf.PutBE64int64(first.T)
for _, h := range histograms {
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T)
EncodeFloatHistogram(&buf, h.FH)
}
return buf.Get()
}
// EncodeFloatHistogram encodes the Float Histogram into a byte slice.
func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) {
buf.PutByte(byte(h.CounterResetHint))
buf.PutVarint64(int64(h.Schema))
buf.PutBEFloat64(h.ZeroThreshold)
buf.PutBEFloat64(h.ZeroCount)
buf.PutBEFloat64(h.Count)
buf.PutBEFloat64(h.Sum)
buf.PutUvarint(len(h.PositiveSpans))
for _, s := range h.PositiveSpans {
buf.PutVarint64(int64(s.Offset))
buf.PutUvarint32(s.Length)
}
buf.PutUvarint(len(h.NegativeSpans))
for _, s := range h.NegativeSpans {
buf.PutVarint64(int64(s.Offset))
buf.PutUvarint32(s.Length)
}
buf.PutUvarint(len(h.PositiveBuckets))
for _, b := range h.PositiveBuckets {
buf.PutBEFloat64(b)
}
buf.PutUvarint(len(h.NegativeBuckets))
for _, b := range h.NegativeBuckets {
buf.PutBEFloat64(b)
}
if histogram.IsCustomBucketsSchema(h.Schema) {
buf.PutUvarint(len(h.CustomValues))
for _, v := range h.CustomValues {
buf.PutBEFloat64(v)
}
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/tombstones/tombstones.go | tsdb/tombstones/tombstones.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tombstones
import (
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"log/slog"
"math"
"os"
"path/filepath"
"sort"
"sync"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/encoding"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
const TombstonesFilename = "tombstones"
const (
// MagicTombstone is 4 bytes at the head of a tombstone file.
MagicTombstone = 0x0130BA30
tombstoneFormatV1 = 1
tombstoneFormatVersionSize = 1
tombstonesHeaderSize = 5
tombstonesCRCSize = 4
)
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
var castagnoliTable *crc32.Table
func init() {
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
}
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
// polynomial may be easily changed in one location at a later time, if necessary.
func newCRC32() hash.Hash32 {
return crc32.New(castagnoliTable)
}
// Reader gives access to tombstone intervals by series reference.
type Reader interface {
// Get returns deletion intervals for the series with the given reference.
Get(ref storage.SeriesRef) (Intervals, error)
// Iter calls the given function for each encountered interval.
Iter(func(storage.SeriesRef, Intervals) error) error
// Total returns the total count of tombstones.
Total() uint64
// Close any underlying resources
Close() error
}
func WriteFile(logger *slog.Logger, dir string, tr Reader) (int64, error) {
path := filepath.Join(dir, TombstonesFilename)
tmp := path + ".tmp"
hash := newCRC32()
var size int
f, err := os.Create(tmp)
if err != nil {
return 0, err
}
defer func() {
if f != nil {
if err := f.Close(); err != nil {
logger.Error("close tmp file", "err", err.Error())
}
}
if err := os.RemoveAll(tmp); err != nil {
logger.Error("remove tmp file", "err", err.Error())
}
}()
buf := encoding.Encbuf{B: make([]byte, 3*binary.MaxVarintLen64)}
buf.Reset()
// Write the meta.
buf.PutBE32(MagicTombstone)
n, err := f.Write(buf.Get())
if err != nil {
return 0, err
}
size += n
bytes, err := Encode(tr)
if err != nil {
return 0, fmt.Errorf("encoding tombstones: %w", err)
}
// Ignore first byte which is the format type. We do this for compatibility.
if _, err := hash.Write(bytes[tombstoneFormatVersionSize:]); err != nil {
return 0, fmt.Errorf("calculating hash for tombstones: %w", err)
}
n, err = f.Write(bytes)
if err != nil {
return 0, fmt.Errorf("writing tombstones: %w", err)
}
size += n
n, err = f.Write(hash.Sum(nil))
if err != nil {
return 0, err
}
size += n
if err := f.Sync(); err != nil {
return 0, tsdb_errors.NewMulti(err, f.Close()).Err()
}
if err = f.Close(); err != nil {
return 0, err
}
f = nil
return int64(size), fileutil.Replace(tmp, path)
}
// Encode encodes the tombstones from the reader.
// It does not attach any magic number or checksum.
func Encode(tr Reader) ([]byte, error) {
buf := encoding.Encbuf{}
buf.PutByte(tombstoneFormatV1)
err := tr.Iter(func(ref storage.SeriesRef, ivs Intervals) error {
for _, iv := range ivs {
buf.PutUvarint64(uint64(ref))
buf.PutVarint64(iv.Mint)
buf.PutVarint64(iv.Maxt)
}
return nil
})
return buf.Get(), err
}
// Decode decodes the tombstones from the bytes
// which was encoded using the Encode method.
func Decode(b []byte) (Reader, error) {
d := &encoding.Decbuf{B: b}
if flag := d.Byte(); flag != tombstoneFormatV1 {
return nil, fmt.Errorf("invalid tombstone format %x", flag)
}
if d.Err() != nil {
return nil, d.Err()
}
stonesMap := NewMemTombstones()
for d.Len() > 0 {
k := storage.SeriesRef(d.Uvarint64())
mint := d.Varint64()
maxt := d.Varint64()
if d.Err() != nil {
return nil, d.Err()
}
stonesMap.AddInterval(k, Interval{mint, maxt})
}
return stonesMap, nil
}
// Stone holds the information on the posting and time-range
// that is deleted.
type Stone struct {
Ref storage.SeriesRef
Intervals Intervals
}
func ReadTombstones(dir string) (Reader, int64, error) {
b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename))
switch {
case os.IsNotExist(err):
return NewMemTombstones(), 0, nil
case err != nil:
return nil, 0, err
}
if len(b) < tombstonesHeaderSize {
return nil, 0, fmt.Errorf("tombstones header: %w", encoding.ErrInvalidSize)
}
d := &encoding.Decbuf{B: b[:len(b)-tombstonesCRCSize]}
if mg := d.Be32(); mg != MagicTombstone {
return nil, 0, fmt.Errorf("invalid magic number %x", mg)
}
// Verify checksum.
hash := newCRC32()
// Ignore first byte which is the format type.
if _, err := hash.Write(d.Get()[tombstoneFormatVersionSize:]); err != nil {
return nil, 0, fmt.Errorf("write to hash: %w", err)
}
if binary.BigEndian.Uint32(b[len(b)-tombstonesCRCSize:]) != hash.Sum32() {
return nil, 0, errors.New("checksum did not match")
}
if d.Err() != nil {
return nil, 0, d.Err()
}
stonesMap, err := Decode(d.Get())
if err != nil {
return nil, 0, err
}
return stonesMap, int64(len(b)), nil
}
type MemTombstones struct {
intvlGroups map[storage.SeriesRef]Intervals
mtx sync.RWMutex
}
// NewMemTombstones creates new in memory Tombstone Reader
// that allows adding new intervals.
func NewMemTombstones() *MemTombstones {
return &MemTombstones{intvlGroups: make(map[storage.SeriesRef]Intervals)}
}
func NewTestMemTombstones(intervals []Intervals) *MemTombstones {
ret := NewMemTombstones()
for i, intervalsGroup := range intervals {
for _, interval := range intervalsGroup {
ret.AddInterval(storage.SeriesRef(i+1), interval)
}
}
return ret
}
func (t *MemTombstones) Get(ref storage.SeriesRef) (Intervals, error) {
t.mtx.RLock()
defer t.mtx.RUnlock()
intervals, ok := t.intvlGroups[ref]
if !ok {
return nil, nil
}
// Make a copy to avoid race.
res := make(Intervals, len(intervals))
copy(res, intervals)
return res, nil
}
func (t *MemTombstones) DeleteTombstones(refs map[storage.SeriesRef]struct{}) {
t.mtx.Lock()
defer t.mtx.Unlock()
for ref := range refs {
delete(t.intvlGroups, ref)
}
}
func (t *MemTombstones) TruncateBefore(beforeT int64) {
t.mtx.Lock()
defer t.mtx.Unlock()
for ref, ivs := range t.intvlGroups {
i := len(ivs) - 1
for ; i >= 0; i-- {
if beforeT > ivs[i].Maxt {
break
}
}
if len(ivs[i+1:]) == 0 {
delete(t.intvlGroups, ref)
} else {
newIvs := make(Intervals, len(ivs[i+1:]))
copy(newIvs, ivs[i+1:])
t.intvlGroups[ref] = newIvs
}
}
}
func (t *MemTombstones) Iter(f func(storage.SeriesRef, Intervals) error) error {
t.mtx.RLock()
defer t.mtx.RUnlock()
for ref, ivs := range t.intvlGroups {
if err := f(ref, ivs); err != nil {
return err
}
}
return nil
}
func (t *MemTombstones) Total() uint64 {
t.mtx.RLock()
defer t.mtx.RUnlock()
total := uint64(0)
for _, ivs := range t.intvlGroups {
total += uint64(len(ivs))
}
return total
}
// AddInterval to an existing memTombstones.
func (t *MemTombstones) AddInterval(ref storage.SeriesRef, itvs ...Interval) {
t.mtx.Lock()
defer t.mtx.Unlock()
for _, itv := range itvs {
t.intvlGroups[ref] = t.intvlGroups[ref].Add(itv)
}
}
func (*MemTombstones) Close() error {
return nil
}
// Interval represents a single time-interval.
type Interval struct {
Mint, Maxt int64
}
func (tr Interval) InBounds(t int64) bool {
return t >= tr.Mint && t <= tr.Maxt
}
func (tr Interval) IsSubrange(dranges Intervals) bool {
for _, r := range dranges {
if r.InBounds(tr.Mint) && r.InBounds(tr.Maxt) {
return true
}
}
return false
}
// Intervals represents a set of increasing and non-overlapping time-intervals.
type Intervals []Interval
// Add the new time-range to the existing ones.
// The existing ones must be sorted.
func (in Intervals) Add(n Interval) Intervals {
if len(in) == 0 {
return append(in, n)
}
// Find min and max indexes of intervals that overlap with the new interval.
// Intervals are closed [t1, t2] and t is discrete, so if neighbour intervals are 1 step difference
// to the new one, we can merge those together.
mini := 0
if n.Mint != math.MinInt64 { // Avoid overflow.
mini = sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 })
if mini == len(in) {
return append(in, n)
}
}
maxi := len(in)
if n.Maxt != math.MaxInt64 { // Avoid overflow.
maxi = sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 })
if maxi == 0 {
if mini == 0 {
return append(Intervals{n}, in...)
}
return append(in[:mini], append(Intervals{n}, in[mini:]...)...)
}
}
if n.Mint < in[mini].Mint {
in[mini].Mint = n.Mint
}
in[mini].Maxt = max(n.Maxt, in[maxi+mini-1].Maxt)
return append(in[:mini+1], in[maxi+mini:]...)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/tombstones/tombstones_test.go | tsdb/tombstones/tombstones_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tombstones
import (
"math"
"math/rand"
"sync"
"testing"
"time"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/storage"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestWriteAndReadbackTombstones(t *testing.T) {
tmpdir := t.TempDir()
ref := uint64(0)
stones := NewMemTombstones()
// Generate the tombstones.
for range 100 {
ref += uint64(rand.Int31n(10)) + 1
numRanges := rand.Intn(5) + 1
dranges := make(Intervals, 0, numRanges)
mint := rand.Int63n(time.Now().UnixNano())
for range numRanges {
dranges = dranges.Add(Interval{mint, mint + rand.Int63n(1000)})
mint += rand.Int63n(1000) + 1
}
stones.AddInterval(storage.SeriesRef(ref), dranges...)
}
_, err := WriteFile(promslog.NewNopLogger(), tmpdir, stones)
require.NoError(t, err)
restr, _, err := ReadTombstones(tmpdir)
require.NoError(t, err)
// Compare the two readers.
require.Equal(t, stones, restr)
}
func TestDeletingTombstones(t *testing.T) {
stones := NewMemTombstones()
ref := storage.SeriesRef(42)
mint := rand.Int63n(time.Now().UnixNano())
dranges := make(Intervals, 0, 1)
dranges = dranges.Add(Interval{mint, mint + rand.Int63n(1000)})
stones.AddInterval(ref, dranges...)
stones.AddInterval(storage.SeriesRef(43), dranges...)
intervals, err := stones.Get(ref)
require.NoError(t, err)
require.Equal(t, intervals, dranges)
stones.DeleteTombstones(map[storage.SeriesRef]struct{}{ref: {}})
intervals, err = stones.Get(ref)
require.NoError(t, err)
require.Empty(t, intervals)
}
func TestTombstonesGetWithCopy(t *testing.T) {
stones := NewMemTombstones()
stones.AddInterval(1, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}...)
intervals0, err := stones.Get(1)
require.NoError(t, err)
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0)
intervals1 := intervals0.Add(Interval{Mint: 4, Maxt: 6})
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0) // Original slice changed.
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals1)
intervals2, err := stones.Get(1)
require.NoError(t, err)
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals2)
}
func TestTruncateBefore(t *testing.T) {
cases := []struct {
before Intervals
beforeT int64
after Intervals
}{
{
before: Intervals{{1, 2}, {4, 10}, {12, 100}},
beforeT: 3,
after: Intervals{{4, 10}, {12, 100}},
},
{
before: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
beforeT: 900,
after: Intervals{{200, 1000}},
},
{
before: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
beforeT: 2000,
after: nil,
},
{
before: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
beforeT: 0,
after: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
},
}
for _, c := range cases {
ref := storage.SeriesRef(42)
stones := NewMemTombstones()
stones.AddInterval(ref, c.before...)
stones.TruncateBefore(c.beforeT)
ts, err := stones.Get(ref)
require.NoError(t, err)
require.Equal(t, c.after, ts)
}
}
func TestAddingNewIntervals(t *testing.T) {
cases := []struct {
exist Intervals
new Interval
exp Intervals
}{
{
new: Interval{1, 2},
exp: Intervals{{1, 2}},
},
{
exist: Intervals{{1, 2}},
new: Interval{1, 2},
exp: Intervals{{1, 2}},
},
{
exist: Intervals{{1, 4}, {6, 6}},
new: Interval{5, 6},
exp: Intervals{{1, 6}},
},
{
exist: Intervals{{1, 10}, {12, 20}, {25, 30}},
new: Interval{21, 25},
exp: Intervals{{1, 10}, {12, 30}},
},
{
exist: Intervals{{1, 10}, {12, 20}, {25, 30}},
new: Interval{22, 23},
exp: Intervals{{1, 10}, {12, 20}, {22, 23}, {25, 30}},
},
{
exist: Intervals{{1, 2}, {3, 5}, {7, 7}},
new: Interval{6, 7},
exp: Intervals{{1, 2}, {3, 7}},
},
{
exist: Intervals{{1, 10}, {12, 20}, {25, 30}},
new: Interval{18, 23},
exp: Intervals{{1, 10}, {12, 23}, {25, 30}},
},
{
exist: Intervals{{1, 10}, {12, 20}, {25, 30}},
new: Interval{9, 23},
exp: Intervals{{1, 23}, {25, 30}},
},
{
exist: Intervals{{1, 10}, {12, 20}, {25, 30}},
new: Interval{9, 230},
exp: Intervals{{1, 230}},
},
{
exist: Intervals{{5, 10}, {12, 20}, {25, 30}},
new: Interval{1, 4},
exp: Intervals{{1, 10}, {12, 20}, {25, 30}},
},
{
exist: Intervals{{5, 10}, {12, 20}, {25, 30}},
new: Interval{11, 14},
exp: Intervals{{5, 20}, {25, 30}},
},
{
exist: Intervals{{5, 10}, {12, 20}, {25, 30}},
new: Interval{1, 3},
exp: Intervals{{1, 3}, {5, 10}, {12, 20}, {25, 30}},
},
{
exist: Intervals{{5, 10}, {12, 20}, {25, 30}},
new: Interval{35, 40},
exp: Intervals{{5, 10}, {12, 20}, {25, 30}, {35, 40}},
},
{
new: Interval{math.MinInt64, 2},
exp: Intervals{{math.MinInt64, 2}},
},
{
exist: Intervals{{math.MinInt64, 2}},
new: Interval{9, math.MaxInt64},
exp: Intervals{{math.MinInt64, 2}, {9, math.MaxInt64}},
},
{
exist: Intervals{{9, math.MaxInt64}},
new: Interval{math.MinInt64, 2},
exp: Intervals{{math.MinInt64, 2}, {9, math.MaxInt64}},
},
{
exist: Intervals{{9, math.MaxInt64}},
new: Interval{math.MinInt64, 10},
exp: Intervals{{math.MinInt64, math.MaxInt64}},
},
{
exist: Intervals{{9, 10}},
new: Interval{math.MinInt64, 7},
exp: Intervals{{math.MinInt64, 7}, {9, 10}},
},
{
exist: Intervals{{9, 10}},
new: Interval{12, math.MaxInt64},
exp: Intervals{{9, 10}, {12, math.MaxInt64}},
},
{
exist: Intervals{{9, 10}},
new: Interval{math.MinInt64, 8},
exp: Intervals{{math.MinInt64, 10}},
},
{
exist: Intervals{{9, 10}},
new: Interval{11, math.MaxInt64},
exp: Intervals{{9, math.MaxInt64}},
},
}
for _, c := range cases {
t.Run("", func(t *testing.T) {
require.Equal(t, c.exp, c.exist.Add(c.new))
})
}
}
// TestMemTombstonesConcurrency to make sure they are safe to access from different goroutines.
func TestMemTombstonesConcurrency(t *testing.T) {
tomb := NewMemTombstones()
totalRuns := 100
var wg sync.WaitGroup
wg.Add(2)
go func() {
for x := range totalRuns {
tomb.AddInterval(storage.SeriesRef(x), Interval{int64(x), int64(x)})
}
wg.Done()
}()
go func() {
for x := range totalRuns {
_, err := tomb.Get(storage.SeriesRef(x))
require.NoError(t, err)
}
wg.Done()
}()
wg.Wait()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/direct_io_unsupported.go | tsdb/fileutil/direct_io_unsupported.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !linux
package fileutil
import (
"bufio"
"os"
)
func NewBufioWriterWithSize(f *os.File, size int) (BufWriter, error) {
return &writer{bufio.NewWriterSize(f, size)}, nil
}
func NewDirectIOWriter(*os.File, int) (BufWriter, error) {
return nil, errDirectIOUnsupported
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/flock_test.go | tsdb/fileutil/flock_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/util/testutil"
)
func TestLocking(t *testing.T) {
dir := testutil.NewTemporaryDirectory("test_flock", t)
defer dir.Close()
fileName := filepath.Join(dir.Path(), "LOCK")
_, err := os.Stat(fileName)
require.Error(t, err, "File %q unexpectedly exists.", fileName)
lock, existed, err := Flock(fileName)
require.NoError(t, err, "Error locking file %q", fileName)
require.False(t, existed, "File %q reported as existing during locking.", fileName)
// File must now exist.
_, err = os.Stat(fileName)
require.NoError(t, err, "Could not stat file %q expected to exist", fileName)
// Try to lock again.
lockedAgain, existed, err := Flock(fileName)
require.Error(t, err, "File %q locked twice.", fileName)
require.Nil(t, lockedAgain, "Unsuccessful locking did not return nil.")
require.True(t, existed, "Existing file %q not recognized.", fileName)
err = lock.Release()
require.NoError(t, err, "Error releasing lock for file %q", fileName)
// File must still exist.
_, err = os.Stat(fileName)
require.NoError(t, err, "Could not stat file %q expected to exist", fileName)
// Lock existing file.
lock, existed, err = Flock(fileName)
require.NoError(t, err, "Error locking file %q", fileName)
require.True(t, existed, "Existing file %q not recognized.", fileName)
err = lock.Release()
require.NoError(t, err, "Error releasing lock for file %q", fileName)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/direct_io_writer.go | tsdb/fileutil/direct_io_writer.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
package fileutil
import (
"errors"
"fmt"
"io"
"os"
"unsafe"
"golang.org/x/sys/unix"
)
const (
// the defaults are deliberately set higher to cover most setups.
// On Linux >= 6.1, statx(2) https://man7.org/linux/man-pages/man2/statx.2.html will be later
// used to fetch the exact alignment restrictions.
defaultAlignment = 4096
defaultBufSize = 4096
)
var (
errWriterInvalid = errors.New("the last flush resulted in an unaligned offset, the writer can no longer ensure contiguous writes")
errStatxNotSupported = errors.New("the statx syscall with STATX_DIOALIGN is not supported. At least Linux kernel 6.1 is needed")
)
// directIOWriter is a specialized bufio.Writer that supports Direct IO to a file
// by ensuring all alignment restrictions are satisfied.
// The writer can handle files whose initial offsets are not aligned.
// Once Direct IO is in use, if an explicit call to Flush() results in an unaligned offset, the writer
// should no longer be used, as it can no longer support contiguous writes.
type directIOWriter struct {
buf []byte
n int
f *os.File
// offsetAlignmentGap represents the number of bytes needed to reach the nearest
// offset alignment on the file, making Direct IO possible.
offsetAlignmentGap int
alignmentRqmts *directIORqmts
err error
invalid bool
}
func newDirectIOWriter(f *os.File, size int) (*directIOWriter, error) {
alignmentRqmts, err := fileDirectIORqmts(f)
if err != nil {
return nil, err
}
if size <= 0 {
size = defaultBufSize
}
if size%alignmentRqmts.offsetAlign != 0 {
return nil, fmt.Errorf("size %d should be a multiple of %d", size, alignmentRqmts.offsetAlign)
}
gap, err := checkInitialUnalignedOffset(f, alignmentRqmts)
if err != nil {
return nil, err
}
return &directIOWriter{
buf: alignedBlock(size, alignmentRqmts),
f: f,
offsetAlignmentGap: gap,
alignmentRqmts: alignmentRqmts,
}, nil
}
func (b *directIOWriter) Available() int { return len(b.buf) - b.n }
func (b *directIOWriter) Buffered() int { return b.n }
// fillInitialOffsetGap writes the necessary bytes from the buffer without Direct IO
// to fill offsetAlignmentGap and align the file offset, enabling Direct IO usage.
// Once alignment is achieved, Direct IO is enabled.
func (b *directIOWriter) fillInitialOffsetGap() {
if b.n == 0 || b.offsetAlignmentGap == 0 {
return
}
bytesToAlign := min(b.n, b.offsetAlignmentGap)
n, err := b.f.Write(b.buf[:bytesToAlign])
if n < bytesToAlign && err == nil {
err = io.ErrShortWrite
}
if n > 0 {
copy(b.buf[0:b.n-n], b.buf[n:b.n])
b.n -= n
}
// If the file offset was aligned, enable Direct IO.
b.offsetAlignmentGap -= n
if b.offsetAlignmentGap == 0 {
err = errors.Join(err, enableDirectIO(b.f.Fd()))
}
b.err = errors.Join(b.err, err)
}
func (b *directIOWriter) directIOWrite(p []byte, padding int) (int, error) {
relevant := len(p) - padding
n, err := b.f.Write(p)
switch {
case n < relevant:
relevant = n
if err == nil {
err = io.ErrShortWrite
}
case n > relevant:
// Adjust the offset to discard the padding that was written.
writtenPadding := int64(n - relevant)
_, err := b.f.Seek(-writtenPadding, io.SeekCurrent)
if err != nil {
b.err = errors.Join(b.err, fmt.Errorf("seek to discard written padding %d: %w", writtenPadding, err))
}
}
if relevant%b.alignmentRqmts.offsetAlign != 0 {
b.invalid = true
}
return relevant, err
}
// canDirectIOWrite returns true when all Direct IO alignment restrictions
// are met for the p block to be written into the file.
func (b *directIOWriter) canDirectIOWrite(p []byte) bool {
return isAligned(p, b.alignmentRqmts) && b.offsetAlignmentGap == 0
}
func (b *directIOWriter) Write(p []byte) (nn int, err error) {
if b.invalid {
return 0, errWriterInvalid
}
for len(p) > b.Available() && b.err == nil {
var n1, n2 int
if b.Buffered() == 0 && b.canDirectIOWrite(p) {
// Large write, empty buffer.
// To avoid copy, write from p via Direct IO as the block and the file
// offset are aligned.
n1, b.err = b.directIOWrite(p, 0)
} else {
n1 = copy(b.buf[b.n:], p)
b.n += n1
if b.offsetAlignmentGap != 0 {
b.fillInitialOffsetGap()
// Refill the buffer.
n2 = copy(b.buf[b.n:], p[n1:])
b.n += n2
}
if b.Available() == 0 {
// Avoid flushing in case the second refill wasn't complete.
b.err = errors.Join(b.err, b.flush())
}
}
nn += n1 + n2
p = p[n1+n2:]
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], p)
b.n += n
nn += n
return nn, nil
}
func (b *directIOWriter) flush() error {
if b.invalid {
return errWriterInvalid
}
if b.err != nil {
return b.err
}
if b.n == 0 {
return nil
}
// Ensure the segment length alignment restriction is met.
// If the buffer length isn't a multiple of offsetAlign, round
// it to the nearest upper multiple and add zero padding.
uOffset := b.n
if uOffset%b.alignmentRqmts.offsetAlign != 0 {
uOffset = ((uOffset / b.alignmentRqmts.offsetAlign) + 1) * b.alignmentRqmts.offsetAlign
for i := b.n; i < uOffset; i++ {
b.buf[i] = 0
}
}
n, err := b.directIOWrite(b.buf[:uOffset], uOffset-b.n)
if err != nil {
if n > 0 && n < b.n {
copy(b.buf[0:b.n-n], b.buf[n:b.n])
}
b.n -= n
b.err = errors.Join(b.err, err)
return err
}
b.n = 0
return nil
}
func (b *directIOWriter) Flush() error {
if b.offsetAlignmentGap != 0 {
b.fillInitialOffsetGap()
if b.err != nil {
return b.err
}
}
return b.flush()
}
func (b *directIOWriter) Reset(f *os.File) error {
alignmentRqmts, err := fileDirectIORqmts(f)
if err != nil {
return err
}
b.alignmentRqmts = alignmentRqmts
if b.buf == nil {
b.buf = alignedBlock(defaultBufSize, b.alignmentRqmts)
}
gap, err := checkInitialUnalignedOffset(f, b.alignmentRqmts)
if err != nil {
return err
}
b.offsetAlignmentGap = gap
b.err = nil
b.invalid = false
b.n = 0
b.f = f
return nil
}
// fileDirectIORqmts fetches alignment requirements via Statx, falling back to default
// values when unsupported.
func fileDirectIORqmts(f *os.File) (*directIORqmts, error) {
alignmentRqmts, err := fetchDirectIORqmtsFromStatx(f.Fd())
switch {
case errors.Is(err, errStatxNotSupported):
alignmentRqmts = defaultDirectIORqmts()
case err != nil:
return nil, err
}
if alignmentRqmts.memoryAlign == 0 || alignmentRqmts.offsetAlign == 0 {
// This may require some extra testing.
return nil, fmt.Errorf("zero alignment requirement is not supported %+v", alignmentRqmts)
}
return alignmentRqmts, nil
}
func alignmentOffset(block []byte, requiredAlignment int) int {
return computeAlignmentOffset(block, requiredAlignment)
}
func computeAlignmentOffset(block []byte, alignment int) int {
if alignment == 0 {
return 0
}
if len(block) == 0 {
panic("empty block not supported")
}
return int(uintptr(unsafe.Pointer(&block[0])) & uintptr(alignment-1))
}
// isAligned checks if the length of the block is a multiple of offsetAlign
// and if its address is aligned with memoryAlign.
func isAligned(block []byte, alignmentRqmts *directIORqmts) bool {
return alignmentOffset(block, alignmentRqmts.memoryAlign) == 0 && len(block)%alignmentRqmts.offsetAlign == 0
}
// alignedBlock returns a block whose address is alignment aligned.
// The size should be a multiple of offsetAlign.
func alignedBlock(size int, alignmentRqmts *directIORqmts) []byte {
if size == 0 || size%alignmentRqmts.offsetAlign != 0 {
panic(fmt.Errorf("size %d should be > 0 and a multiple of offsetAlign=%d", size, alignmentRqmts.offsetAlign))
}
if alignmentRqmts.memoryAlign == 0 {
return make([]byte, size)
}
block := make([]byte, size+alignmentRqmts.memoryAlign)
a := alignmentOffset(block, alignmentRqmts.memoryAlign)
if a == 0 {
return block[:size]
}
offset := alignmentRqmts.memoryAlign - a
block = block[offset : offset+size]
if !isAligned(block, alignmentRqmts) {
// Assuming this to be rare, if not impossible.
panic("cannot create an aligned block")
}
return block
}
func currentFileOffset(f *os.File) (int, error) {
curOff, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return 0, fmt.Errorf("cannot get the current offset: %w", err)
}
return int(curOff), nil
}
func fileStatusFlags(fd uintptr) (int, error) {
flag, err := unix.FcntlInt(fd, unix.F_GETFL, 0)
if err != nil {
return 0, fmt.Errorf("cannot get file status flags: %w", err)
}
return flag, err
}
// enableDirectIO enables Direct IO on the file if needed.
func enableDirectIO(fd uintptr) error {
flag, err := fileStatusFlags(fd)
if err != nil {
return err
}
if (flag & unix.O_DIRECT) == unix.O_DIRECT {
return nil
}
_, err = unix.FcntlInt(fd, unix.F_SETFL, flag|unix.O_DIRECT)
if err != nil {
return fmt.Errorf("cannot enable Direct IO: %w", err)
}
return nil
}
// checkInitialUnalignedOffset returns the gap between the current offset of the file
// and the nearest aligned offset.
// If the current offset is aligned, Direct IO is enabled on the file.
func checkInitialUnalignedOffset(f *os.File, alignmentRqmts *directIORqmts) (int, error) {
offset, err := currentFileOffset(f)
if err != nil {
return 0, err
}
alignment := alignmentRqmts.offsetAlign
gap := (alignment - offset%alignment) % alignment
if gap == 0 {
if err := enableDirectIO(f.Fd()); err != nil {
return 0, err
}
}
return gap, nil
}
// directIORqmts holds the alignment requirements for direct I/O.
// All fields are in bytes.
type directIORqmts struct {
// The required alignment for memory buffers addresses.
memoryAlign int
// The required alignment for I/O segment lengths and file offsets.
offsetAlign int
}
func defaultDirectIORqmts() *directIORqmts {
return &directIORqmts{
memoryAlign: defaultAlignment,
offsetAlign: defaultAlignment,
}
}
// fetchDirectIORqmtsFromStatx tries to retrieve direct I/O alignment requirements for the
// file descriptor using statx.
func fetchDirectIORqmtsFromStatx(fd uintptr) (*directIORqmts, error) {
var stat unix.Statx_t
flags := unix.AT_SYMLINK_NOFOLLOW | unix.AT_EMPTY_PATH | unix.AT_STATX_DONT_SYNC
mask := unix.STATX_DIOALIGN
if err := unix.Statx(int(fd), "", flags, unix.STATX_DIOALIGN, &stat); err != nil {
if err == unix.ENOSYS {
return nil, errStatxNotSupported
}
return nil, fmt.Errorf("statx failed on fd %d: %w", fd, err)
}
if stat.Mask&uint32(mask) == 0 {
return nil, errStatxNotSupported
}
if stat.Dio_mem_align == 0 || stat.Dio_offset_align == 0 {
return nil, fmt.Errorf("%w: kernel may be old or the file may be on an unsupported FS", errDirectIOUnsupported)
}
return &directIORqmts{
memoryAlign: int(stat.Dio_mem_align),
offsetAlign: int(stat.Dio_offset_align),
}, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/mmap_arm64.go | tsdb/fileutil/mmap_arm64.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package fileutil
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/direct_io_writer_test.go | tsdb/fileutil/direct_io_writer_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
package fileutil
import (
"io"
"os"
"path"
"testing"
"github.com/stretchr/testify/require"
)
func directIORqmtsForTest(tb testing.TB) *directIORqmts {
f, err := os.OpenFile(path.Join(tb.TempDir(), "foo"), os.O_CREATE|os.O_WRONLY, 0o666)
require.NoError(tb, err)
alignmentRqmts, err := fileDirectIORqmts(f)
require.NoError(tb, err)
return alignmentRqmts
}
func TestDirectIOFile(t *testing.T) {
tmpDir := t.TempDir()
f, err := os.OpenFile(path.Join(tmpDir, "test"), os.O_CREATE|os.O_WRONLY, 0o666)
require.NoError(t, err)
require.NoError(t, enableDirectIO(f.Fd()))
}
func TestAlignedBlockEarlyPanic(t *testing.T) {
alignRqmts := directIORqmtsForTest(t)
cases := []struct {
desc string
size int
}{
{"Zero size", 0},
{"Size not multiple of offset alignment", 9973},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
require.Panics(t, func() {
alignedBlock(tc.size, alignRqmts)
})
})
}
}
func TestAlignedBloc(t *testing.T) {
alignRqmts := directIORqmtsForTest(t)
block := alignedBlock(5*alignRqmts.offsetAlign, alignRqmts)
require.True(t, isAligned(block, alignRqmts))
require.Len(t, block, 5*alignRqmts.offsetAlign)
require.False(t, isAligned(block[1:], alignRqmts))
}
func TestDirectIOWriter(t *testing.T) {
alignRqmts := directIORqmtsForTest(t)
cases := []struct {
name string
initialOffset int
bufferSize int
dataSize int
// writtenBytes should also consider needed zero padding.
writtenBytes int
shouldInvalidate bool
}{
{
name: "data equal to buffer",
bufferSize: 8 * alignRqmts.offsetAlign,
dataSize: 8 * alignRqmts.offsetAlign,
writtenBytes: 8 * alignRqmts.offsetAlign,
},
{
name: "data exceeds buffer",
bufferSize: 4 * alignRqmts.offsetAlign,
dataSize: 64 * alignRqmts.offsetAlign,
writtenBytes: 64 * alignRqmts.offsetAlign,
},
{
name: "data exceeds buffer + final offset unaligned",
bufferSize: 2 * alignRqmts.offsetAlign,
dataSize: 4*alignRqmts.offsetAlign + 33,
writtenBytes: 4*alignRqmts.offsetAlign + alignRqmts.offsetAlign,
shouldInvalidate: true,
},
{
name: "data smaller than buffer",
bufferSize: 8 * alignRqmts.offsetAlign,
dataSize: 3 * alignRqmts.offsetAlign,
writtenBytes: 3 * alignRqmts.offsetAlign,
},
{
name: "data smaller than buffer + final offset unaligned",
bufferSize: 4 * alignRqmts.offsetAlign,
dataSize: alignRqmts.offsetAlign + 70,
writtenBytes: alignRqmts.offsetAlign + alignRqmts.offsetAlign,
shouldInvalidate: true,
},
{
name: "offset aligned",
initialOffset: alignRqmts.offsetAlign,
bufferSize: 8 * alignRqmts.offsetAlign,
dataSize: alignRqmts.offsetAlign,
writtenBytes: alignRqmts.offsetAlign,
},
{
name: "initial offset unaligned + final offset unaligned",
initialOffset: 8,
bufferSize: 8 * alignRqmts.offsetAlign,
dataSize: 64 * alignRqmts.offsetAlign,
writtenBytes: 64*alignRqmts.offsetAlign + (alignRqmts.offsetAlign - 8),
shouldInvalidate: true,
},
{
name: "offset unaligned + final offset aligned",
initialOffset: 8,
bufferSize: 4 * alignRqmts.offsetAlign,
dataSize: 4*alignRqmts.offsetAlign + (alignRqmts.offsetAlign - 8),
writtenBytes: 4*alignRqmts.offsetAlign + (alignRqmts.offsetAlign - 8),
},
{
name: "empty data",
bufferSize: 4 * alignRqmts.offsetAlign,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
fileName := path.Join(t.TempDir(), "test")
data := make([]byte, tc.dataSize)
for i := range data {
// Do not use 256 as it may be a divider of requiredAlignment. To avoid patterns.
data[i] = byte(i % 251)
}
f, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY, 0o666)
require.NoError(t, err)
if tc.initialOffset != 0 {
_, err = f.Seek(int64(tc.initialOffset), io.SeekStart)
require.NoError(t, err)
}
w, err := newDirectIOWriter(f, tc.bufferSize)
require.NoError(t, err)
n, err := w.Write(data)
require.NoError(t, err)
require.Equal(t, tc.dataSize, n)
require.NoError(t, w.Flush())
// Check the file's final offset.
currOffset, err := currentFileOffset(f)
require.NoError(t, err)
require.Equal(t, tc.dataSize+tc.initialOffset, currOffset)
// Check the written data.
fileBytes, err := os.ReadFile(fileName)
require.NoError(t, err)
if tc.dataSize > 0 {
require.Len(t, fileBytes, tc.writtenBytes+tc.initialOffset)
require.Equal(t, data, fileBytes[tc.initialOffset:tc.dataSize+tc.initialOffset])
} else {
require.Empty(t, fileBytes)
}
// Check the writer state.
if tc.shouldInvalidate {
require.True(t, w.invalid)
require.Error(t, w.Flush())
_, err = w.Write([]byte{})
require.Error(t, err)
} else {
require.False(t, w.invalid)
require.NoError(t, w.Flush())
_, err = w.Write([]byte{})
require.NoError(t, err)
}
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/sync_linux.go | tsdb/fileutil/sync_linux.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
package fileutil
import (
"os"
"syscall"
)
// Fdatasync is similar to fsync(), but does not flush modified metadata
// unless that metadata is needed in order to allow a subsequent data retrieval
// to be correctly handled.
func Fdatasync(f *os.File) error {
return syscall.Fdatasync(int(f.Fd()))
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/sync.go | tsdb/fileutil/sync.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !linux && !darwin
package fileutil
import "os"
// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
func Fdatasync(f *os.File) error {
return f.Sync()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/flock_js.go | tsdb/fileutil/flock_js.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build js
package fileutil
import "errors"
type unixLock struct{}
func (l *unixLock) Release() error {
return errors.New("unsupported")
}
func (l *unixLock) set(lock bool) error {
return errors.New("unsupported")
}
func newLock(fileName string) (Releaser, error) {
return nil, errors.New("unsupported")
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/flock_unix.go | tsdb/fileutil/flock_unix.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
package fileutil
import (
"os"
"syscall"
)
type unixLock struct {
f *os.File
}
func (l *unixLock) Release() error {
if err := l.set(false); err != nil {
return err
}
return l.f.Close()
}
func (l *unixLock) set(lock bool) error {
how := syscall.LOCK_UN
if lock {
how = syscall.LOCK_EX
}
return syscall.Flock(int(l.f.Fd()), how|syscall.LOCK_NB)
}
func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
if err != nil {
return nil, err
}
l := &unixLock{f}
err = l.set(true)
if err != nil {
f.Close()
return nil, err
}
return l, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/preallocate.go | tsdb/fileutil/preallocate.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"io"
"os"
)
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
if sizeInBytes == 0 {
// fallocate will return EINVAL if length is 0; skip
return nil
}
if extendFile {
return preallocExtend(f, sizeInBytes)
}
return preallocFixed(f, sizeInBytes)
}
func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
curOff, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
size, err := f.Seek(sizeInBytes, io.SeekEnd)
if err != nil {
return err
}
if _, err = f.Seek(curOff, io.SeekStart); err != nil {
return err
}
if sizeInBytes > size {
return nil
}
return f.Truncate(sizeInBytes)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/mmap.go | tsdb/fileutil/mmap.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"fmt"
"os"
)
type MmapFile struct {
f *os.File
b []byte
}
func OpenMmapFile(path string) (*MmapFile, error) {
return OpenMmapFileWithSize(path, 0)
}
func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) {
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("try lock file: %w", err)
}
defer func() {
if retErr != nil {
f.Close()
}
}()
if size <= 0 {
info, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("stat: %w", err)
}
size = int(info.Size())
}
b, err := mmap(f, size)
if err != nil {
return nil, fmt.Errorf("mmap, size %d: %w", size, err)
}
return &MmapFile{f: f, b: b}, nil
}
func (f *MmapFile) Close() error {
err0 := munmap(f.b)
err1 := f.f.Close()
if err0 != nil {
return err0
}
return err1
}
func (f *MmapFile) File() *os.File {
return f.f
}
func (f *MmapFile) Bytes() []byte {
return f.b
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/flock.go | tsdb/fileutil/flock.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path/filepath"
)
// Releaser provides the Release method to release a file lock.
type Releaser interface {
Release() error
}
// Flock locks the file with the provided name. If the file does not exist, it is
// created. The returned Releaser is used to release the lock. existed is true
// if the file to lock already existed. A non-nil error is returned if the
// locking has failed. Neither this function nor the returned Releaser is
// goroutine-safe.
func Flock(fileName string) (r Releaser, existed bool, err error) {
if err = os.MkdirAll(filepath.Dir(fileName), 0o755); err != nil {
return nil, false, err
}
_, err = os.Stat(fileName)
existed = err == nil
r, err = newLock(fileName)
return r, existed, err
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/mmap_windows.go | tsdb/fileutil/mmap_windows.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"syscall"
"unsafe"
)
func mmap(f *os.File, size int) ([]byte, error) {
low, high := uint32(size), uint32(size>>32)
h, errno := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, high, low, nil)
if h == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno)
}
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(size))
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
return nil, os.NewSyscallError("CloseHandle", err)
}
if addr == 0 {
return nil, os.NewSyscallError("MapViewOfFile", errno)
}
return (*[maxMapSize]byte)(unsafe.Pointer(addr))[:size], nil
}
func munmap(b []byte) error {
if err := syscall.UnmapViewOfFile((uintptr)(unsafe.Pointer(&b[0]))); err != nil {
return os.NewSyscallError("UnmapViewOfFile", err)
}
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/preallocate_linux.go | tsdb/fileutil/preallocate_linux.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"errors"
"os"
"syscall"
)
func preallocExtend(f *os.File, sizeInBytes int64) error {
// use mode = 0 to change size
err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
if err != nil {
var errno syscall.Errno
// not supported; fallback
// fallocate EINTRs frequently in some environments; fallback
if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
return preallocExtendTrunc(f, sizeInBytes)
}
}
return err
}
func preallocFixed(f *os.File, sizeInBytes int64) error {
// use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
if err != nil {
var errno syscall.Errno
// treat not supported as nil error
if errors.As(err, &errno) && errno == syscall.ENOTSUP {
return nil
}
}
return err
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/flock_windows.go | tsdb/fileutil/flock_windows.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import "syscall"
type windowsLock struct {
fd syscall.Handle
}
func (fl *windowsLock) Release() error {
return syscall.Close(fl.fd)
}
func newLock(fileName string) (Releaser, error) {
pathp, err := syscall.UTF16PtrFromString(fileName)
if err != nil {
return nil, err
}
fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
if err != nil {
return nil, err
}
return &windowsLock{fd}, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/preallocate_other.go | tsdb/fileutil/preallocate_other.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !linux && !darwin
package fileutil
import "os"
func preallocExtend(f *os.File, sizeInBytes int64) error {
return preallocExtendTrunc(f, sizeInBytes)
}
func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/mmap_unix.go | tsdb/fileutil/mmap_unix.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows && !plan9 && !js
package fileutil
import (
"os"
"golang.org/x/sys/unix"
)
func mmap(f *os.File, length int) ([]byte, error) {
return unix.Mmap(int(f.Fd()), 0, length, unix.PROT_READ, unix.MAP_SHARED)
}
func munmap(b []byte) (err error) {
return unix.Munmap(b)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/dir_windows.go | tsdb/fileutil/dir_windows.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package fileutil
import (
"os"
"syscall"
)
// OpenDir opens a directory in windows with write access for syncing.
func OpenDir(path string) (*os.File, error) {
fd, err := openDir(path)
if err != nil {
return nil, err
}
return os.NewFile(uintptr(fd), path), nil
}
func openDir(path string) (fd syscall.Handle, err error) {
if len(path) == 0 {
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
}
pathp, err := syscall.UTF16PtrFromString(path)
if err != nil {
return syscall.InvalidHandle, err
}
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
createmode := uint32(syscall.OPEN_EXISTING)
fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/direct_io.go | tsdb/fileutil/direct_io.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"bufio"
"errors"
"os"
)
var errDirectIOUnsupported = errors.New("direct IO is unsupported")
type BufWriter interface {
Write([]byte) (int, error)
Flush() error
Reset(f *os.File) error
}
// writer is a specialized wrapper around bufio.Writer.
// It is used when Direct IO isn't enabled, as using directIOWriter in such cases is impractical.
type writer struct {
*bufio.Writer
}
func (b *writer) Reset(f *os.File) error {
b.Writer.Reset(f)
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/mmap_js.go | tsdb/fileutil/mmap_js.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build js
package fileutil
import (
"errors"
"os"
)
func mmap(f *os.File, length int) ([]byte, error) {
return nil, errors.New("unsupported")
}
func munmap(b []byte) (err error) {
return errors.New("unsupported")
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/sync_darwin.go | tsdb/fileutil/sync_darwin.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin
package fileutil
import (
"os"
)
// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
// on physical drive media.
func Fdatasync(f *os.File) error {
return f.Sync()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/flock_solaris.go | tsdb/fileutil/flock_solaris.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build solaris
package fileutil
import (
"os"
"syscall"
)
type unixLock struct {
f *os.File
}
func (l *unixLock) Release() error {
if err := l.set(false); err != nil {
return err
}
return l.f.Close()
}
func (l *unixLock) set(lock bool) error {
flock := syscall.Flock_t{
Type: syscall.F_UNLCK,
Start: 0,
Len: 0,
Whence: 1,
}
if lock {
flock.Type = syscall.F_WRLCK
}
return syscall.FcntlFlock(l.f.Fd(), syscall.F_SETLK, &flock)
}
func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
if err != nil {
return nil, err
}
l := &unixLock{f}
err = l.set(true)
if err != nil {
f.Close()
return nil, err
}
return l, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/flock_plan9.go | tsdb/fileutil/flock_plan9.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import "os"
type plan9Lock struct {
f *os.File
}
func (l *plan9Lock) Release() error {
return l.f.Close()
}
func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0o666)
if err != nil {
return nil, err
}
return &plan9Lock{f}, nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/mmap_386.go | tsdb/fileutil/mmap_386.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package fileutil
const maxMapSize = 0x7FFFFFFF // 2GB
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/preallocate_darwin.go | tsdb/fileutil/preallocate_darwin.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"golang.org/x/sys/unix"
)
func preallocExtend(f *os.File, sizeInBytes int64) error {
if err := preallocFixed(f, sizeInBytes); err != nil {
return err
}
return preallocExtendTrunc(f, sizeInBytes)
}
func preallocFixed(f *os.File, sizeInBytes int64) error {
fstore := &unix.Fstore_t{
Flags: unix.F_ALLOCATEALL,
Posmode: unix.F_PEOFPOSMODE,
Length: sizeInBytes,
}
err := unix.FcntlFstore(f.Fd(), unix.F_PREALLOCATE, fstore)
if err == nil || err == unix.ENOTSUP {
return nil
}
return err
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/dir_unix.go | tsdb/fileutil/dir_unix.go | // Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
package fileutil
import "os"
// OpenDir opens a directory for syncing.
func OpenDir(path string) (*os.File, error) { return os.Open(path) }
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/fileutil.go | tsdb/fileutil/fileutil.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileutil provides utility methods used when dealing with the filesystem in tsdb.
// It is largely copied from github.com/coreos/etcd/pkg/fileutil to avoid the
// dependency chain it brings with it.
// Please check github.com/coreos/etcd for licensing information.
package fileutil
import (
"os"
"path/filepath"
"strings"
)
// CopyDirs copies all directories, subdirectories and files recursively including the empty folders.
// Source and destination must be full paths.
func CopyDirs(src, dest string) error {
if err := os.MkdirAll(dest, 0o777); err != nil {
return err
}
files, err := readDirs(src)
if err != nil {
return err
}
for _, f := range files {
dp := filepath.Join(dest, f)
sp := filepath.Join(src, f)
stat, err := os.Stat(sp)
if err != nil {
return err
}
// Empty directories are also created.
if stat.IsDir() {
if err := os.MkdirAll(dp, 0o777); err != nil {
return err
}
continue
}
if err := copyFile(sp, dp); err != nil {
return err
}
}
return nil
}
func copyFile(src, dest string) error {
data, err := os.ReadFile(src)
if err != nil {
return err
}
err = os.WriteFile(dest, data, 0o666)
if err != nil {
return err
}
return nil
}
// readDirs reads the source directory recursively and
// returns relative paths to all files and empty directories.
func readDirs(src string) ([]string, error) {
var files []string
err := filepath.Walk(src, func(path string, _ os.FileInfo, _ error) error {
relativePath := strings.TrimPrefix(path, src)
if len(relativePath) > 0 {
files = append(files, relativePath)
}
return nil
})
if err != nil {
return nil, err
}
return files, nil
}
// Rename safely renames a file.
func Rename(from, to string) error {
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = pdir.Sync(); err != nil {
pdir.Close()
return err
}
return pdir.Close()
}
// Replace moves a file or directory to a new location and deletes any previous data.
// It is not atomic.
func Replace(from, to string) error {
// Remove destination only if it is a dir otherwise leave it to os.Rename
// as it replaces the destination file and is atomic.
{
f, err := os.Stat(to)
if !os.IsNotExist(err) {
if err == nil && f.IsDir() {
if err := os.RemoveAll(to); err != nil {
return err
}
}
}
}
return Rename(from, to)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/direct_io_force.go | tsdb/fileutil/direct_io_force.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This allows seamless testing of the Direct I/O writer across all tsdb tests.
//go:build linux && forcedirectio
package fileutil
import "os"
func NewDirectIOWriter(f *os.File, size int) (BufWriter, error) {
return newDirectIOWriter(f, size)
}
func NewBufioWriterWithSize(f *os.File, size int) (BufWriter, error) {
return NewDirectIOWriter(f, size)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/direct_io_linux.go | tsdb/fileutil/direct_io_linux.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux && !forcedirectio
package fileutil
import (
"bufio"
"os"
)
func NewBufioWriterWithSize(f *os.File, size int) (BufWriter, error) {
return &writer{bufio.NewWriterSize(f, size)}, nil
}
func NewDirectIOWriter(f *os.File, size int) (BufWriter, error) {
return newDirectIOWriter(f, size)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/dir.go | tsdb/fileutil/dir.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path/filepath"
)
func DirSize(dir string) (int64, error) {
var size int64
err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error {
if err != nil {
// Ignore missing files that may have been deleted during the walk.
if os.IsNotExist(err) {
return nil
}
return err
}
if !info.IsDir() {
size += info.Size()
}
return nil
})
return size, err
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/fileutil/mmap_amd64.go | tsdb/fileutil/mmap_amd64.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package fileutil
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/goversion/init.go | tsdb/goversion/init.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package goversion
// This will fail to compile if the Go runtime version isn't >= 1.12.
var _ = _SoftwareRequiresGOVERSION1_12
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/goversion/goversion_test.go | tsdb/goversion/goversion_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package goversion_test
import (
"testing"
_ "github.com/prometheus/prometheus/tsdb/goversion"
)
// This test is intentionally blank and exists only so `go test` believes
// there is something to test.
//
// The blank import above is actually what invokes the test of this package. If
// the import succeeds (the code compiles), the test passed.
func Test(*testing.T) {}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/goversion/goversion.go | tsdb/goversion/goversion.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.12
// Package goversion enforces the go version supported by the tsdb module.
package goversion
const _SoftwareRequiresGOVERSION1_12 = uint8(0)
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/agent/series.go | tsdb/agent/series.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"sync"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
)
// memSeries is a chunkless version of tsdb.memSeries.
type memSeries struct {
sync.Mutex
ref chunks.HeadSeriesRef
lset labels.Labels
// Last recorded timestamp. Used by Storage.gc to determine if a series is
// stale.
lastTs int64
}
// updateTimestamp obtains the lock on s and will attempt to update lastTs.
// fails if newTs < lastTs.
func (m *memSeries) updateTimestamp(newTs int64) bool {
m.Lock()
defer m.Unlock()
if newTs >= m.lastTs {
m.lastTs = newTs
return true
}
return false
}
// seriesHashmap lets agent find a memSeries by its label set, via a 64-bit hash.
// There is one map for the common case where the hash value is unique, and a
// second map for the case that two series have the same hash value.
// Each series is in only one of the maps. Its methods require the hash to be submitted
// with the label set to avoid re-computing hash throughout the code.
type seriesHashmap struct {
unique map[uint64]*memSeries
conflicts map[uint64][]*memSeries
}
func (m *seriesHashmap) Get(hash uint64, lset labels.Labels) *memSeries {
if s, found := m.unique[hash]; found {
if labels.Equal(s.lset, lset) {
return s
}
}
for _, s := range m.conflicts[hash] {
if labels.Equal(s.lset, lset) {
return s
}
}
return nil
}
func (m *seriesHashmap) Set(hash uint64, s *memSeries) {
if existing, found := m.unique[hash]; !found || labels.Equal(existing.lset, s.lset) {
m.unique[hash] = s
return
}
if m.conflicts == nil {
m.conflicts = make(map[uint64][]*memSeries)
}
seriesSet := m.conflicts[hash]
for i, prev := range seriesSet {
if labels.Equal(prev.lset, s.lset) {
seriesSet[i] = s
return
}
}
m.conflicts[hash] = append(seriesSet, s)
}
func (m *seriesHashmap) Delete(hash uint64, ref chunks.HeadSeriesRef) {
var rem []*memSeries
unique, found := m.unique[hash]
switch {
case !found: // Supplied hash is not stored.
return
case unique.ref == ref:
conflicts := m.conflicts[hash]
if len(conflicts) == 0 { // Exactly one series with this hash was stored
delete(m.unique, hash)
return
}
m.unique[hash] = conflicts[0] // First remaining series goes in 'unique'.
rem = conflicts[1:] // Keep the rest.
default: // The series to delete is somewhere in 'conflicts'. Keep all the ones that don't match.
for _, s := range m.conflicts[hash] {
if s.ref != ref {
rem = append(rem, s)
}
}
}
if len(rem) == 0 {
delete(m.conflicts, hash)
} else {
m.conflicts[hash] = rem
}
}
// stripeSeries locks modulo ranges of IDs and hashes to reduce lock
// contention. The locks are padded to not be on the same cache line.
// Filling the padded space with the maps was profiled to be slower -
// likely due to the additional pointer dereferences.
type stripeSeries struct {
size int
series []map[chunks.HeadSeriesRef]*memSeries
hashes []seriesHashmap
exemplars []map[chunks.HeadSeriesRef]*exemplar.Exemplar
locks []stripeLock
gcMut sync.Mutex
}
type stripeLock struct {
sync.RWMutex
// Padding to avoid multiple locks being on the same cache line.
_ [40]byte
}
func newStripeSeries(stripeSize int) *stripeSeries {
s := &stripeSeries{
size: stripeSize,
series: make([]map[chunks.HeadSeriesRef]*memSeries, stripeSize),
hashes: make([]seriesHashmap, stripeSize),
exemplars: make([]map[chunks.HeadSeriesRef]*exemplar.Exemplar, stripeSize),
locks: make([]stripeLock, stripeSize),
}
for i := range s.series {
s.series[i] = map[chunks.HeadSeriesRef]*memSeries{}
}
for i := range s.hashes {
s.hashes[i] = seriesHashmap{
unique: map[uint64]*memSeries{},
conflicts: nil, // Initialized on demand in set().
}
}
for i := range s.exemplars {
s.exemplars[i] = map[chunks.HeadSeriesRef]*exemplar.Exemplar{}
}
return s
}
// GC garbage collects old series that have not received a sample after mint
// and will fully delete them.
func (s *stripeSeries) GC(mint int64) map[chunks.HeadSeriesRef]struct{} {
// NOTE(rfratto): GC will grab two locks, one for the hash and the other for
// series. It's not valid for any other function to grab both locks,
// otherwise a deadlock might occur when running GC in parallel with
// appending.
s.gcMut.Lock()
defer s.gcMut.Unlock()
deleted := map[chunks.HeadSeriesRef]struct{}{}
// For one series, truncate old chunks and check if any chunks left. If not, mark as deleted and collect the ID.
check := func(hashLock int, hash uint64, series *memSeries) {
series.Lock()
// Any series that has received a write since mint is still alive.
if series.lastTs >= mint {
series.Unlock()
return
}
// The series is stale. We need to obtain a second lock for the
// ref if it's different than the hash lock.
refLock := int(series.ref) & (s.size - 1)
if hashLock != refLock {
s.locks[refLock].Lock()
}
deleted[series.ref] = struct{}{}
delete(s.series[refLock], series.ref)
s.hashes[hashLock].Delete(hash, series.ref)
// Since the series is gone, we'll also delete
// the latest stored exemplar.
delete(s.exemplars[refLock], series.ref)
if hashLock != refLock {
s.locks[refLock].Unlock()
}
series.Unlock()
}
for hashLock := 0; hashLock < s.size; hashLock++ {
s.locks[hashLock].Lock()
for hash, all := range s.hashes[hashLock].conflicts {
for _, series := range all {
check(hashLock, hash, series)
}
}
for hash, series := range s.hashes[hashLock].unique {
check(hashLock, hash, series)
}
s.locks[hashLock].Unlock()
}
return deleted
}
func (s *stripeSeries) GetByID(id chunks.HeadSeriesRef) *memSeries {
refLock := uint64(id) & uint64(s.size-1)
s.locks[refLock].RLock()
defer s.locks[refLock].RUnlock()
return s.series[refLock][id]
}
func (s *stripeSeries) GetByHash(hash uint64, lset labels.Labels) *memSeries {
hashLock := hash & uint64(s.size-1)
s.locks[hashLock].RLock()
defer s.locks[hashLock].RUnlock()
return s.hashes[hashLock].Get(hash, lset)
}
func (s *stripeSeries) Set(hash uint64, series *memSeries) {
var (
hashLock = hash & uint64(s.size-1)
refLock = uint64(series.ref) & uint64(s.size-1)
)
// We can't hold both locks at once otherwise we might deadlock with a
// simultaneous call to GC.
//
// We update s.series first because GC expects anything in s.hashes to
// already exist in s.series.
s.locks[refLock].Lock()
s.series[refLock][series.ref] = series
s.locks[refLock].Unlock()
s.locks[hashLock].Lock()
s.hashes[hashLock].Set(hash, series)
s.locks[hashLock].Unlock()
}
func (s *stripeSeries) GetLatestExemplar(ref chunks.HeadSeriesRef) *exemplar.Exemplar {
i := uint64(ref) & uint64(s.size-1)
s.locks[i].RLock()
exemplar := s.exemplars[i][ref]
s.locks[i].RUnlock()
return exemplar
}
func (s *stripeSeries) SetLatestExemplar(ref chunks.HeadSeriesRef, exemplar *exemplar.Exemplar) {
i := uint64(ref) & uint64(s.size-1)
// Make sure that's a valid series id and record its latest exemplar
s.locks[i].Lock()
if s.series[i][ref] != nil {
s.exemplars[i][ref] = exemplar
}
s.locks[i].Unlock()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/agent/db_append_v2.go | tsdb/agent/db_append_v2.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"context"
"errors"
"fmt"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
)
// AppenderV2 implements storage.AppenderV2.
func (db *DB) AppenderV2(context.Context) storage.AppenderV2 {
return db.appenderV2Pool.Get().(storage.AppenderV2)
}
type appenderV2 struct {
appenderBase
}
// Append appends pending sample to agent's DB.
// TODO: Wire metadata in the Agent's appender.
func (a *appenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
var (
// Avoid shadowing err variables for reliability.
valErr, partialErr error
sampleMetricType = sampleMetricTypeFloat
isStale bool
)
// Fail fast on incorrect histograms.
switch {
case fh != nil:
sampleMetricType = sampleMetricTypeHistogram
valErr = fh.Validate()
case h != nil:
sampleMetricType = sampleMetricTypeHistogram
valErr = h.Validate()
}
if valErr != nil {
return 0, valErr
}
// series references and chunk references are identical for agent mode.
s := a.series.GetByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
s, err = a.getOrCreate(ls)
if err != nil {
return 0, err
}
}
s.Lock()
lastTS := s.lastTs
s.Unlock()
// TODO(bwplotka): Handle ST natively (as per PROM-60).
if a.opts.EnableSTAsZeroSample && st != 0 {
a.bestEffortAppendSTZeroSample(s, ls, lastTS, st, t, h, fh)
}
if t <= a.minValidTime(lastTS) {
a.metrics.totalOutOfOrderSamples.Inc()
return 0, storage.ErrOutOfOrderSample
}
switch {
case fh != nil:
isStale = value.IsStaleNaN(fh.Sum)
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
Ref: s.ref,
T: t,
FH: fh,
})
a.floatHistogramSeries = append(a.floatHistogramSeries, s)
case h != nil:
isStale = value.IsStaleNaN(h.Sum)
// NOTE: always modify pendingHistograms and histogramSeries together
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
Ref: s.ref,
T: t,
H: h,
})
a.histogramSeries = append(a.histogramSeries, s)
default:
isStale = value.IsStaleNaN(v)
// NOTE: always modify pendingSamples and sampleSeries together.
a.pendingSamples = append(a.pendingSamples, record.RefSample{
Ref: s.ref,
T: t,
V: v,
})
a.sampleSeries = append(a.sampleSeries, s)
}
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricType).Inc()
if isStale {
// For stale values we never attempt to process metadata/exemplars, claim the success.
return storage.SeriesRef(s.ref), nil
}
// Append exemplars if any and if storage was configured for it.
// TODO(bwplotka): Agent does not have equivalent of a.head.opts.EnableExemplarStorage && a.head.opts.MaxExemplars.Load() > 0 ?
if len(opts.Exemplars) > 0 {
// Currently only exemplars can return partial errors.
partialErr = a.appendExemplars(s, opts.Exemplars)
}
return storage.SeriesRef(s.ref), partialErr
}
func (a *appenderV2) appendExemplars(s *memSeries, exemplar []exemplar.Exemplar) error {
var errs []error
for _, e := range exemplar {
// Ensure no empty labels have gotten through.
e.Labels = e.Labels.WithoutEmpty()
if err := a.validateExemplar(s.ref, e); err != nil {
if !errors.Is(err, storage.ErrDuplicateExemplar) {
// Except duplicates, return partial errors.
errs = append(errs, err)
continue
}
if !errors.Is(err, storage.ErrOutOfOrderExemplar) {
a.logger.Debug("Error while adding an exemplar on AppendSample", "exemplars", fmt.Sprintf("%+v", e), "err", e)
}
continue
}
a.series.SetLatestExemplar(s.ref, &e)
a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{
Ref: s.ref,
T: e.Ts,
V: e.Value,
Labels: e.Labels,
})
a.metrics.totalAppendedExemplars.Inc()
}
if len(errs) > 0 {
return &storage.AppendPartialError{ExemplarErrors: errs}
}
return nil
}
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
//
// ST is an experimental feature, we don't fail the append on errors, just debug log.
func (a *appenderV2) bestEffortAppendSTZeroSample(s *memSeries, ls labels.Labels, lastTS, st, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) {
// NOTE: Use lset instead of s.lset to avoid locking memSeries. Using s.ref is acceptable without locking.
if st >= t {
a.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrSTNewerThanSample)
return
}
if st <= lastTS {
a.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrOutOfOrderST)
return
}
switch {
case fh != nil:
zeroFloatHistogram := &histogram.FloatHistogram{
// The STZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
CustomValues: fh.CustomValues,
}
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{Ref: s.ref, T: st, FH: zeroFloatHistogram})
a.floatHistogramSeries = append(a.floatHistogramSeries, s)
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
case h != nil:
zeroHistogram := &histogram.Histogram{
// The STZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
CustomValues: h.CustomValues,
}
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{Ref: s.ref, T: st, H: zeroHistogram})
a.histogramSeries = append(a.histogramSeries, s)
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
default:
a.pendingSamples = append(a.pendingSamples, record.RefSample{Ref: s.ref, T: st, V: 0})
a.sampleSeries = append(a.sampleSeries, s)
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/agent/db_test.go | tsdb/agent/db_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"context"
"errors"
"fmt"
"io"
"math"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/testutil"
)
func TestDB_InvalidSeries(t *testing.T) {
s := createTestAgentDB(t, nil, DefaultOptions())
defer s.Close()
app := s.Appender(context.Background())
t.Run("Samples", func(t *testing.T) {
_, err := app.Append(0, labels.Labels{}, 0, 0)
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
_, err = app.Append(0, labels.FromStrings("a", "1", "a", "2"), 0, 0)
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
})
t.Run("Histograms", func(t *testing.T) {
_, err := app.AppendHistogram(0, labels.Labels{}, 0, tsdbutil.GenerateTestHistograms(1)[0], nil)
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
_, err = app.AppendHistogram(0, labels.FromStrings("a", "1", "a", "2"), 0, tsdbutil.GenerateTestHistograms(1)[0], nil)
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
})
t.Run("Exemplars", func(t *testing.T) {
sRef, err := app.Append(0, labels.FromStrings("a", "1"), 0, 0)
require.NoError(t, err, "should not reject valid series")
_, err = app.AppendExemplar(0, labels.EmptyLabels(), exemplar.Exemplar{})
require.EqualError(t, err, "unknown series ref when trying to add exemplar: 0")
e := exemplar.Exemplar{Labels: labels.FromStrings("a", "1", "a", "2")}
_, err = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
require.ErrorIs(t, err, tsdb.ErrInvalidExemplar, "should reject duplicate labels")
e = exemplar.Exemplar{Labels: labels.FromStrings("a_somewhat_long_trace_id", "nYJSNtFrFTY37VR7mHzEE/LIDt7cdAQcuOzFajgmLDAdBSRHYPDzrxhMA4zz7el8naI/AoXFv9/e/G0vcETcIoNUi3OieeLfaIRQci2oa")}
_, err = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
require.ErrorIs(t, err, storage.ErrExemplarLabelLength, "should reject too long label length")
// Inverse check
e = exemplar.Exemplar{Labels: labels.FromStrings("a", "1"), Value: 20, Ts: 10, HasTs: true}
_, err = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
require.NoError(t, err, "should not reject valid exemplars")
})
}
func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) *DB {
t.Helper()
dbDir := t.TempDir()
rs := remote.NewStorage(promslog.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
db, err := Open(promslog.NewNopLogger(), reg, rs, dbDir, opts)
require.NoError(t, err)
return db
}
func TestUnsupportedFunctions(t *testing.T) {
s := createTestAgentDB(t, nil, DefaultOptions())
defer s.Close()
t.Run("Querier", func(t *testing.T) {
_, err := s.Querier(0, 0)
require.Equal(t, err, ErrUnsupported)
})
t.Run("ChunkQuerier", func(t *testing.T) {
_, err := s.ChunkQuerier(0, 0)
require.Equal(t, err, ErrUnsupported)
})
t.Run("ExemplarQuerier", func(t *testing.T) {
_, err := s.ExemplarQuerier(context.TODO())
require.Equal(t, err, ErrUnsupported)
})
}
func TestCommit(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.Appender(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for i := range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
ref, err := app.Append(0, lset, sample[0].T(), sample[0].F())
require.NoError(t, err)
e := exemplar.Exemplar{
Labels: lset,
Ts: sample[0].T() + int64(i),
Value: sample[0].F(),
HasTs: true,
}
_, err = app.AppendExemplar(ref, lset, e)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), customBucketHistograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), nil, customBucketFloatHistograms[i])
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series, samples, and exemplars.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
default:
}
}
// Check that the WAL contained the same number of committed series/samples/exemplars.
require.Equal(t, numSeries*5, walSeriesCount, "unexpected number of series")
require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples")
require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms")
require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms")
}
func TestRollback(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.Appender(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
_, err := app.Append(0, lset, sample[0].T(), sample[0].F())
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
require.NoError(t, err)
}
}
// Do a rollback, which should clear uncommitted data. A followup call to
// commit should persist nothing to the WAL.
require.NoError(t, app.Rollback())
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series and samples.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
default:
}
}
// Check that only series get stored after calling Rollback.
require.Equal(t, numSeries*5, walSeriesCount, "series should have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
}
func TestFullTruncateWAL(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 800
lastTs = 500
)
reg := prometheus.NewRegistry()
opts := DefaultOptions()
opts.TruncateFrequency = time.Minute * 2
s := createTestAgentDB(t, reg, opts)
defer func() {
require.NoError(t, s.Close())
}()
app := s.Appender(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, int64(lastTs), 0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Truncate WAL with mint to GC all the samples.
s.truncate(lastTs + 1)
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
}
func TestPartialTruncateWAL(t *testing.T) {
const (
numDatapoints = 1000
numSeries = 800
)
opts := DefaultOptions()
reg := prometheus.NewRegistry()
s := createTestAgentDB(t, reg, opts)
defer func() {
require.NoError(t, s.Close())
}()
app := s.Appender(context.TODO())
// Create first batch of 800 series with 1000 data-points with a fixed lastTs as 500.
var lastTs int64 = 500
lbls := labelsForTest(t.Name()+"batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, lastTs, 0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600.
lastTs = 600
lbls = labelsForTest(t.Name()+"batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, lastTs, 0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series.
s.truncate(lastTs - 1)
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
require.Len(t, m.Metric, 1)
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
}
func TestWALReplay(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
lastTs = 500
)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.Appender(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, lastTs, 0)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
// Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
// We need the original directory so we can recreate the storage for replay.
storageDir := filepath.Dir(s.wal.Dir())
reg := prometheus.NewRegistry()
replayStorage, err := Open(s.logger, reg, nil, storageDir, s.opts)
if err != nil {
t.Fatalf("unable to create storage for the agent: %v", err)
}
defer func() {
require.NoError(t, replayStorage.Close())
}()
// Check if all the series are retrieved back from the WAL.
m := gatherFamily(t, reg, "prometheus_agent_active_series")
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count")
// Check if lastTs of the samples retrieved from the WAL is retained.
metrics := replayStorage.series.series
for i := range metrics {
mp := metrics[i]
for _, v := range mp {
require.Equal(t, v.lastTs, int64(lastTs))
}
}
}
func TestLockfile(t *testing.T) {
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
logger := promslog.NewNopLogger()
reg := prometheus.NewRegistry()
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
opts := DefaultOptions()
opts.NoLockfile = !createLock
// Create the DB. This should create lockfile and its metrics.
db, err := Open(logger, nil, rs, data, opts)
require.NoError(t, err)
return db.locker, testutil.NewCallbackCloser(func() {
require.NoError(t, db.Close())
})
})
}
func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir()
rs := remote.NewStorage(promslog.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
defer func() {
require.NoError(t, rs.Close())
}()
db, err := Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
seriesCount := 10
// Append <seriesCount> series
app := db.Appender(context.Background())
for i := range seriesCount {
lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("series_%d", i))
_, err := app.Append(0, lset, 0, 100)
require.NoError(t, err)
}
histogramCount := 10
histograms := tsdbutil.GenerateTestHistograms(histogramCount)
// Append <histogramCount> series
for i := range histogramCount {
lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("histogram_%d", i))
_, err := app.AppendHistogram(0, lset, 0, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Truncate the WAL to force creation of a new segment.
require.NoError(t, db.truncate(0))
require.NoError(t, db.Close())
// Create a new storage and see what nextRef is initialized to.
db, err = Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
}
func Test_validateOptions(t *testing.T) {
t.Run("Apply defaults to zero values", func(t *testing.T) {
opts := validateOptions(&Options{})
require.Equal(t, DefaultOptions(), opts)
})
t.Run("Defaults are already valid", func(t *testing.T) {
require.Equal(t, DefaultOptions(), validateOptions(nil))
})
t.Run("MaxWALTime should not be lower than TruncateFrequency", func(t *testing.T) {
opts := validateOptions(&Options{
MaxWALTime: int64(time.Hour / time.Millisecond),
TruncateFrequency: 2 * time.Hour,
})
require.Equal(t, int64(2*time.Hour/time.Millisecond), opts.MaxWALTime)
})
}
func startTime() (int64, error) {
return time.Now().Unix() * 1000, nil
}
// Create series for tests.
func labelsForTest(lName string, seriesCount int) [][]labels.Label {
var series [][]labels.Label
for i := range seriesCount {
lset := []labels.Label{
{Name: "a", Value: lName},
{Name: "instance", Value: "localhost" + strconv.Itoa(i)},
{Name: "job", Value: "prometheus"},
}
series = append(series, lset)
}
return series
}
func gatherFamily(t *testing.T, reg prometheus.Gatherer, familyName string) *dto.MetricFamily {
t.Helper()
families, err := reg.Gather()
require.NoError(t, err, "failed to gather metrics")
for _, f := range families {
if f.GetName() == familyName {
return f
}
}
t.Fatalf("could not find family %s", familyName)
return nil
}
func TestStorage_DuplicateExemplarsIgnored(t *testing.T) {
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.Appender(context.Background())
defer s.Close()
sRef, err := app.Append(0, labels.FromStrings("a", "1"), 0, 0)
require.NoError(t, err, "should not reject valid series")
// Write a few exemplars to our appender and call Commit().
// If the Labels, Value or Timestamp are different than the last exemplar,
// then a new one should be appended; Otherwise, it should be skipped.
e := exemplar.Exemplar{Labels: labels.FromStrings("a", "1"), Value: 20, Ts: 10, HasTs: true}
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
e.Labels = labels.FromStrings("b", "2")
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
e.Value = 42
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
e.Ts = 25
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
_, _ = app.AppendExemplar(sRef, labels.EmptyLabels(), e)
require.NoError(t, app.Commit())
// Read back what was written to the WAL.
var walExemplarsCount int
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer sr.Close()
r := wlog.NewReader(sr)
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
for r.Next() {
rec := r.Record()
if dec.Type(rec) == record.Exemplars {
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
}
}
// We had 9 calls to AppendExemplar but only 4 of those should have gotten through.
require.Equal(t, 4, walExemplarsCount)
}
func TestDBAllowOOOSamples(t *testing.T) {
const (
numDatapoints = 5
numHistograms = 5
numSeries = 4
offset = 100
)
reg := prometheus.NewRegistry()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = math.MaxInt64
s := createTestAgentDB(t, reg, opts)
app := s.Appender(context.TODO())
// Let's add some samples in the [offset, offset+numDatapoints) range.
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for i := offset; i < numDatapoints+offset; i++ {
ref, err := app.Append(0, lset, int64(i), float64(i))
require.NoError(t, err)
e := exemplar.Exemplar{
Labels: lset,
Ts: int64(i) * 2,
Value: float64(i),
HasTs: true,
}
_, err = app.AppendExemplar(ref, lset, e)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i-offset], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i-offset], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i-offset])
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i-offset])
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
require.Equal(t, float64(20), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
require.NoError(t, s.Close())
// Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
// We need the original directory so we can recreate the storage for replay.
storageDir := filepath.Dir(s.wal.Dir())
// Replay the storage so that the lastTs for each series is recorded.
reg2 := prometheus.NewRegistry()
db, err := Open(s.logger, reg2, nil, storageDir, s.opts)
if err != nil {
t.Fatalf("unable to create storage for the agent: %v", err)
}
app = db.Appender(context.Background())
// Now the lastTs will have been recorded successfully.
// Let's try appending twice as many OOO samples in the [0, numDatapoints) range.
lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
for i := range numDatapoints {
ref, err := app.Append(0, lset, int64(i), float64(i))
require.NoError(t, err)
e := exemplar.Exemplar{
Labels: lset,
Ts: int64(i) * 2,
Value: float64(i),
HasTs: true,
}
_, err = app.AppendExemplar(ref, lset, e)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
require.NoError(t, err)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/agent/db_append_v2_test.go | tsdb/agent/db_append_v2_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"context"
"fmt"
"math"
"path/filepath"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/testutil"
)
func TestDB_InvalidSeries_AppendV2(t *testing.T) {
s := createTestAgentDB(t, nil, DefaultOptions())
defer s.Close()
app := s.AppenderV2(context.Background())
t.Run("Samples", func(t *testing.T) {
_, err := app.Append(0, labels.Labels{}, 0, 0, 0, nil, nil, storage.AOptions{})
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
_, err = app.Append(0, labels.FromStrings("a", "1", "a", "2"), 0, 0, 0, nil, nil, storage.AOptions{})
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
})
t.Run("Histograms", func(t *testing.T) {
_, err := app.Append(0, labels.Labels{}, 0, 0, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
_, err = app.Append(0, labels.FromStrings("a", "1", "a", "2"), 0, 0, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
})
t.Run("Exemplars", func(t *testing.T) {
e := exemplar.Exemplar{Labels: labels.FromStrings("a", "1", "a", "2")}
_, err := app.Append(0, labels.FromStrings("a", "1"), 0, 0, 0, nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{e},
})
partErr := &storage.AppendPartialError{}
require.ErrorAs(t, err, &partErr)
require.Len(t, partErr.ExemplarErrors, 1)
require.ErrorIs(t, partErr.ExemplarErrors[0], tsdb.ErrInvalidExemplar, "should reject duplicate labels")
e = exemplar.Exemplar{Labels: labels.FromStrings("a_somewhat_long_trace_id", "nYJSNtFrFTY37VR7mHzEE/LIDt7cdAQcuOzFajgmLDAdBSRHYPDzrxhMA4zz7el8naI/AoXFv9/e/G0vcETcIoNUi3OieeLfaIRQci2oa")}
_, err = app.Append(0, labels.FromStrings("a", "2"), 0, 0, 0, nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{e},
})
partErr = &storage.AppendPartialError{}
require.ErrorAs(t, err, &partErr)
require.Len(t, partErr.ExemplarErrors, 1)
require.ErrorIs(t, partErr.ExemplarErrors[0], storage.ErrExemplarLabelLength, "should reject too long label length")
// Inverse check.
e = exemplar.Exemplar{Labels: labels.FromStrings("a", "1"), Value: 20, Ts: 10, HasTs: true}
_, err = app.Append(0, labels.FromStrings("a", "1"), 0, 0, 0, nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{e},
})
require.NoError(t, err, "should not reject valid exemplars")
})
}
func TestCommit_AppendV2(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.AppenderV2(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for i := range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
_, err := app.Append(0, lset, 0, sample[0].T(), sample[0].F(), nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{{
Labels: lset,
Ts: sample[0].T() + int64(i),
Value: sample[0].F(),
HasTs: true,
}},
})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, customBucketHistograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, customBucketFloatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series, samples, and exemplars.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
default:
}
}
// Check that the WAL contained the same number of committed series/samples/exemplars.
require.Equal(t, numSeries*5, walSeriesCount, "unexpected number of series")
require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples")
require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms")
require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms")
}
func TestRollback_AppendV2(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.AppenderV2(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
_, err := app.Append(0, lset, 0, sample[0].T(), sample[0].F(), nil, nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
// Do a rollback, which should clear uncommitted data. A followup call to
// commit should persist nothing to the WAL.
require.NoError(t, app.Rollback())
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series and samples.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
default:
}
}
// Check that only series get stored after calling Rollback.
require.Equal(t, numSeries*5, walSeriesCount, "series should have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
}
func TestFullTruncateWAL_AppendV2(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 800
lastTs = 500
)
reg := prometheus.NewRegistry()
opts := DefaultOptions()
opts.TruncateFrequency = time.Minute * 2
s := createTestAgentDB(t, reg, opts)
defer func() {
require.NoError(t, s.Close())
}()
app := s.AppenderV2(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, 0, int64(lastTs), 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(lastTs), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(lastTs), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(lastTs), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(lastTs), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Truncate WAL with mint to GC all the samples.
s.truncate(lastTs + 1)
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
}
func TestPartialTruncateWAL_AppendV2(t *testing.T) {
const (
numDatapoints = 1000
numSeries = 800
)
opts := DefaultOptions()
reg := prometheus.NewRegistry()
s := createTestAgentDB(t, reg, opts)
defer func() {
require.NoError(t, s.Close())
}()
app := s.AppenderV2(context.TODO())
// Create first batch of 800 series with 1000 data-points with a fixed lastTs as 500.
var lastTs int64 = 500
lbls := labelsForTest(t.Name()+"batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600.
lastTs = 600
lbls = labelsForTest(t.Name()+"batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series.
s.truncate(lastTs - 1)
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
require.Len(t, m.Metric, 1)
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
}
func TestWALReplay_AppendV2(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
lastTs = 500
)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.AppenderV2(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
// Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
// We need the original directory so we can recreate the storage for replay.
storageDir := filepath.Dir(s.wal.Dir())
reg := prometheus.NewRegistry()
replayStorage, err := Open(s.logger, reg, nil, storageDir, s.opts)
if err != nil {
t.Fatalf("unable to create storage for the agent: %v", err)
}
defer func() {
require.NoError(t, replayStorage.Close())
}()
// Check if all the series are retrieved back from the WAL.
m := gatherFamily(t, reg, "prometheus_agent_active_series")
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count")
// Check if lastTs of the samples retrieved from the WAL is retained.
metrics := replayStorage.series.series
for i := range metrics {
mp := metrics[i]
for _, v := range mp {
require.Equal(t, v.lastTs, int64(lastTs))
}
}
}
func Test_ExistingWAL_NextRef_AppendV2(t *testing.T) {
dbDir := t.TempDir()
rs := remote.NewStorage(promslog.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
defer func() {
require.NoError(t, rs.Close())
}()
db, err := Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
seriesCount := 10
// Append <seriesCount> series
app := db.AppenderV2(context.Background())
for i := range seriesCount {
lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("series_%d", i))
_, err := app.Append(0, lset, 0, 0, 100, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
histogramCount := 10
histograms := tsdbutil.GenerateTestHistograms(histogramCount)
// Append <histogramCount> series
for i := range histogramCount {
lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("histogram_%d", i))
_, err := app.Append(0, lset, 0, 0, 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Truncate the WAL to force creation of a new segment.
require.NoError(t, db.truncate(0))
require.NoError(t, db.Close())
// Create a new storage and see what nextRef is initialized to.
db, err = Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
}
func TestStorage_DuplicateExemplarsIgnored_AppendV2(t *testing.T) {
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.AppenderV2(context.Background())
defer s.Close()
// Write a few exemplars to our appender and call Commit().
// If the Labels, Value or Timestamp are different than the last exemplar,
// then a new one should be appended; Otherwise, it should be skipped.
e1 := exemplar.Exemplar{Labels: labels.FromStrings("a", "1"), Value: 20, Ts: 10, HasTs: true}
e2 := exemplar.Exemplar{Labels: labels.FromStrings("b", "2"), Value: 20, Ts: 10, HasTs: true}
e3 := exemplar.Exemplar{Labels: labels.FromStrings("b", "2"), Value: 42, Ts: 10, HasTs: true}
e4 := exemplar.Exemplar{Labels: labels.FromStrings("b", "2"), Value: 42, Ts: 25, HasTs: true}
_, err := app.Append(0, labels.FromStrings("a", "1"), 0, 0, 0, nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{e1, e1, e2, e2, e2, e3, e3, e4, e4},
})
require.NoError(t, err, "should not reject valid series")
require.NoError(t, app.Commit())
// Read back what was written to the WAL.
var walExemplarsCount int
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer sr.Close()
r := wlog.NewReader(sr)
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
for r.Next() {
rec := r.Record()
if dec.Type(rec) == record.Exemplars {
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
}
}
// We had 9 calls to AppendExemplar but only 4 of those should have gotten through.
require.Equal(t, 4, walExemplarsCount)
}
func TestDBAllowOOOSamples_AppendV2(t *testing.T) {
const (
numDatapoints = 5
numHistograms = 5
numSeries = 4
offset = 100
)
reg := prometheus.NewRegistry()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = math.MaxInt64
s := createTestAgentDB(t, reg, opts)
app := s.AppenderV2(context.TODO())
// Let's add some samples in the [offset, offset+numDatapoints) range.
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.Append(0, lset, 0, int64(i), float64(i), nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{{
Labels: lset,
Ts: int64(i) * 2,
Value: float64(i),
HasTs: true,
}},
})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i-offset], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i-offset], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i-offset], storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := offset; i < numDatapoints+offset; i++ {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i-offset], storage.AOptions{})
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
require.Equal(t, float64(20), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
require.NoError(t, s.Close())
// Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
// We need the original directory so we can recreate the storage for replay.
storageDir := filepath.Dir(s.wal.Dir())
// Replay the storage so that the lastTs for each series is recorded.
reg2 := prometheus.NewRegistry()
db, err := Open(s.logger, reg2, nil, storageDir, s.opts)
if err != nil {
t.Fatalf("unable to create storage for the agent: %v", err)
}
app = db.AppenderV2(context.Background())
// Now the lastTs will have been recorded successfully.
// Let's try appending twice as many OOO samples in the [0, numDatapoints) range.
lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, int64(i), float64(i), nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{{
Labels: lset,
Ts: int64(i) * 2,
Value: float64(i),
HasTs: true,
}},
})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries*2)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numDatapoints {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
m = gatherFamily(t, reg2, "prometheus_agent_samples_appended_total")
require.Equal(t, float64(40), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
require.Equal(t, float64(160), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
require.NoError(t, db.Close())
}
func TestDBOutOfOrderTimeWindow_AppendV2(t *testing.T) {
tc := []struct {
outOfOrderTimeWindow, firstTs, secondTs int64
expectedError error
}{
{0, 100, 101, nil},
{0, 100, 100, storage.ErrOutOfOrderSample},
{0, 100, 99, storage.ErrOutOfOrderSample},
{100, 100, 1, nil},
{100, 100, 0, storage.ErrOutOfOrderSample},
}
for _, c := range tc {
t.Run(fmt.Sprintf("outOfOrderTimeWindow=%d, firstTs=%d, secondTs=%d, expectedError=%s", c.outOfOrderTimeWindow, c.firstTs, c.secondTs, c.expectedError), func(t *testing.T) {
reg := prometheus.NewRegistry()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = c.outOfOrderTimeWindow
s := createTestAgentDB(t, reg, opts)
app := s.AppenderV2(context.TODO())
lbls := labelsForTest(t.Name()+"_histogram", 1)
lset := labels.New(lbls[0]...)
_, err := app.Append(0, lset, 0, c.firstTs, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
require.NoError(t, err)
err = app.Commit()
require.NoError(t, err)
_, err = app.Append(0, lset, 0, c.secondTs, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
require.ErrorIs(t, err, c.expectedError)
lbls = labelsForTest(t.Name(), 1)
lset = labels.New(lbls[0]...)
_, err = app.Append(0, lset, 0, c.firstTs, 0, nil, nil, storage.AOptions{})
require.NoError(t, err)
err = app.Commit()
require.NoError(t, err)
_, err = app.Append(0, lset, 0, c.secondTs, 0, nil, nil, storage.AOptions{})
require.ErrorIs(t, err, c.expectedError)
expectedAppendedSamples := float64(2)
if c.expectedError != nil {
expectedAppendedSamples = 1
}
m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/agent/db.go | tsdb/agent/db.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"context"
"errors"
"fmt"
"log/slog"
"math"
"path/filepath"
"sync"
"time"
"unicode/utf8"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/zeropool"
)
const (
sampleMetricTypeFloat = "float"
sampleMetricTypeHistogram = "histogram"
)
var ErrUnsupported = errors.New("unsupported operation with WAL-only storage")
// Default values for options.
var (
DefaultTruncateFrequency = 2 * time.Hour
DefaultMinWALTime = int64(5 * time.Minute / time.Millisecond)
DefaultMaxWALTime = int64(4 * time.Hour / time.Millisecond)
)
// Options of the WAL storage.
type Options struct {
// Segments (wal files) max size.
// WALSegmentSize <= 0, segment size is default size.
// WALSegmentSize > 0, segment size is WALSegmentSize.
WALSegmentSize int
// WALCompression configures the compression type to use on records in the WAL.
WALCompression compression.Type
// StripeSize is the size (power of 2) in entries of the series hash map. Reducing the size will save memory but impact performance.
StripeSize int
// TruncateFrequency determines how frequently to truncate data from the WAL.
TruncateFrequency time.Duration
// Shortest and longest amount of time data can exist in the WAL before being
// deleted.
MinWALTime, MaxWALTime int64
// NoLockfile disables creation and consideration of a lock file.
NoLockfile bool
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
OutOfOrderTimeWindow int64
// EnableSTAsZeroSample represents 'created-timestamp-zero-ingestion' feature flag.
// If true, ST, if non-empty and earlier than sample timestamp, will be stored
// as a zero sample before the actual sample.
//
// The zero sample is best-effort, only debug log on failure is emitted.
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableSTAsZeroSample bool
}
// DefaultOptions used for the WAL storage. They are reasonable for setups using
// millisecond-precision timestamps.
func DefaultOptions() *Options {
return &Options{
WALSegmentSize: wlog.DefaultSegmentSize,
WALCompression: compression.None,
StripeSize: tsdb.DefaultStripeSize,
TruncateFrequency: DefaultTruncateFrequency,
MinWALTime: DefaultMinWALTime,
MaxWALTime: DefaultMaxWALTime,
NoLockfile: false,
OutOfOrderTimeWindow: 0,
}
}
type dbMetrics struct {
r prometheus.Registerer
numActiveSeries prometheus.Gauge
numWALSeriesPendingDeletion prometheus.Gauge
totalAppendedSamples *prometheus.CounterVec
totalAppendedExemplars prometheus.Counter
totalOutOfOrderSamples prometheus.Counter
walTruncateDuration prometheus.Summary
walCorruptionsTotal prometheus.Counter
walTotalReplayDuration prometheus.Gauge
checkpointDeleteFail prometheus.Counter
checkpointDeleteTotal prometheus.Counter
checkpointCreationFail prometheus.Counter
checkpointCreationTotal prometheus.Counter
}
func newDBMetrics(r prometheus.Registerer) *dbMetrics {
m := dbMetrics{r: r}
m.numActiveSeries = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_agent_active_series",
Help: "Number of active series being tracked by the WAL storage",
})
m.numWALSeriesPendingDeletion = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_agent_deleted_series",
Help: "Number of series pending deletion from the WAL",
})
m.totalAppendedSamples = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_agent_samples_appended_total",
Help: "Total number of samples appended to the storage",
}, []string{"type"})
m.totalAppendedExemplars = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_exemplars_appended_total",
Help: "Total number of exemplars appended to the storage",
})
m.totalOutOfOrderSamples = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_out_of_order_samples_total",
Help: "Total number of out of order samples ingestion failed attempts.",
})
m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_agent_truncate_duration_seconds",
Help: "Duration of WAL truncation.",
})
m.walCorruptionsTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_corruptions_total",
Help: "Total number of WAL corruptions.",
})
m.walTotalReplayDuration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_agent_data_replay_duration_seconds",
Help: "Time taken to replay the data on disk.",
})
m.checkpointDeleteFail = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_checkpoint_deletions_failed_total",
Help: "Total number of checkpoint deletions that failed.",
})
m.checkpointDeleteTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_checkpoint_deletions_total",
Help: "Total number of checkpoint deletions attempted.",
})
m.checkpointCreationFail = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_checkpoint_creations_failed_total",
Help: "Total number of checkpoint creations that failed.",
})
m.checkpointCreationTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_checkpoint_creations_total",
Help: "Total number of checkpoint creations attempted.",
})
if r != nil {
r.MustRegister(
m.numActiveSeries,
m.numWALSeriesPendingDeletion,
m.totalAppendedSamples,
m.totalAppendedExemplars,
m.totalOutOfOrderSamples,
m.walTruncateDuration,
m.walCorruptionsTotal,
m.walTotalReplayDuration,
m.checkpointDeleteFail,
m.checkpointDeleteTotal,
m.checkpointCreationFail,
m.checkpointCreationTotal,
)
}
return &m
}
func (m *dbMetrics) Unregister() {
if m.r == nil {
return
}
cs := []prometheus.Collector{
m.numActiveSeries,
m.numWALSeriesPendingDeletion,
m.totalAppendedSamples,
m.totalAppendedExemplars,
m.totalOutOfOrderSamples,
m.walTruncateDuration,
m.walCorruptionsTotal,
m.walTotalReplayDuration,
m.checkpointDeleteFail,
m.checkpointDeleteTotal,
m.checkpointCreationFail,
m.checkpointCreationTotal,
}
for _, c := range cs {
m.r.Unregister(c)
}
}
// DB represents a WAL-only storage. It implements storage.DB.
type DB struct {
mtx sync.RWMutex
logger *slog.Logger
opts *Options
rs *remote.Storage
wal *wlog.WL
locker *tsdbutil.DirLocker
appenderPool sync.Pool
appenderV2Pool sync.Pool
bufPool sync.Pool
// These pools are only used during WAL replay and are reset at the end.
// NOTE: Adjust resetWALReplayResources() upon changes to the pools.
walReplaySeriesPool zeropool.Pool[[]record.RefSeries]
walReplaySamplesPool zeropool.Pool[[]record.RefSample]
walReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample]
walReplayFloatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
nextRef *atomic.Uint64
series *stripeSeries
// deleted is a map of (ref IDs that should be deleted from WAL) to (the WAL segment they
// must be kept around to).
deleted map[chunks.HeadSeriesRef]int
donec chan struct{}
stopc chan struct{}
writeNotified wlog.WriteNotified
metrics *dbMetrics
}
// Open returns a new agent.DB in the given directory.
func Open(l *slog.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) {
opts = validateOptions(opts)
locker, err := tsdbutil.NewDirLocker(dir, "agent", l, reg)
if err != nil {
return nil, err
}
if !opts.NoLockfile {
if err := locker.Lock(); err != nil {
return nil, err
}
}
// remote_write expects WAL to be stored in a "wal" subdirectory of the main storage.
dir = filepath.Join(dir, "wal")
w, err := wlog.NewSize(l, reg, dir, opts.WALSegmentSize, opts.WALCompression)
if err != nil {
return nil, fmt.Errorf("creating WAL: %w", err)
}
db := &DB{
logger: l,
opts: opts,
rs: rs,
wal: w,
locker: locker,
nextRef: atomic.NewUint64(0),
series: newStripeSeries(opts.StripeSize),
deleted: make(map[chunks.HeadSeriesRef]int),
donec: make(chan struct{}),
stopc: make(chan struct{}),
metrics: newDBMetrics(reg),
}
db.bufPool.New = func() any {
return make([]byte, 0, 1024)
}
db.appenderPool.New = func() any {
return &appender{
appenderBase: appenderBase{
DB: db,
pendingSeries: make([]record.RefSeries, 0, 100),
pendingSamples: make([]record.RefSample, 0, 100),
pendingHistograms: make([]record.RefHistogramSample, 0, 100),
pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100),
pendingExamplars: make([]record.RefExemplar, 0, 10),
},
}
}
db.appenderV2Pool.New = func() any {
return &appenderV2{
appenderBase: appenderBase{
DB: db,
pendingSeries: make([]record.RefSeries, 0, 100),
pendingSamples: make([]record.RefSample, 0, 100),
pendingHistograms: make([]record.RefHistogramSample, 0, 100),
pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100),
pendingExamplars: make([]record.RefExemplar, 0, 10),
},
}
}
if err := db.replayWAL(); err != nil {
db.logger.Warn("encountered WAL read error, attempting repair", "err", err)
if err := w.Repair(err); err != nil {
return nil, fmt.Errorf("repair corrupted WAL: %w", err)
}
db.logger.Info("successfully repaired WAL")
}
go db.run()
return db, nil
}
// SetWriteNotified allows to set an instance to notify when a write happens.
// It must be used during initialization. It is not safe to use it during execution.
func (db *DB) SetWriteNotified(wn wlog.WriteNotified) {
db.writeNotified = wn
}
func validateOptions(opts *Options) *Options {
if opts == nil {
opts = DefaultOptions()
}
if opts.WALSegmentSize <= 0 {
opts.WALSegmentSize = wlog.DefaultSegmentSize
}
if opts.WALCompression == "" {
opts.WALCompression = compression.None
}
// Revert StripeSize to DefaultStripeSize if StripeSize is either 0 or not a power of 2.
if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) {
opts.StripeSize = tsdb.DefaultStripeSize
}
if opts.TruncateFrequency <= 0 {
opts.TruncateFrequency = DefaultTruncateFrequency
}
if opts.MinWALTime <= 0 {
opts.MinWALTime = DefaultMinWALTime
}
if opts.MaxWALTime <= 0 {
opts.MaxWALTime = DefaultMaxWALTime
}
if opts.MinWALTime > opts.MaxWALTime {
opts.MaxWALTime = opts.MinWALTime
}
if t := int64(opts.TruncateFrequency / time.Millisecond); opts.MaxWALTime < t {
opts.MaxWALTime = t
}
return opts
}
func (db *DB) replayWAL() error {
db.logger.Info("replaying WAL, this may take a while", "dir", db.wal.Dir())
defer db.resetWALReplayResources()
start := time.Now()
dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir())
if err != nil && !errors.Is(err, record.ErrNotFound) {
return fmt.Errorf("find last checkpoint: %w", err)
}
multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{}
if err == nil {
sr, err := wlog.NewSegmentsReader(dir)
if err != nil {
return fmt.Errorf("open checkpoint: %w", err)
}
defer func() {
if err := sr.Close(); err != nil {
db.logger.Warn("error while closing the wal segments reader", "err", err)
}
}()
// A corrupted checkpoint is a hard error for now and requires user
// intervention. There's likely little data that can be recovered anyway.
if err := db.loadWAL(wlog.NewReader(sr), multiRef); err != nil {
return fmt.Errorf("backfill checkpoint: %w", err)
}
startFrom++
db.logger.Info("WAL checkpoint loaded")
}
// Find the last segment.
_, last, err := wlog.Segments(db.wal.Dir())
if err != nil {
return fmt.Errorf("finding WAL segments: %w", err)
}
// Backfill segments from the most recent checkpoint onwards.
for i := startFrom; i <= last; i++ {
seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i))
if err != nil {
return fmt.Errorf("open WAL segment: %d: %w", i, err)
}
sr := wlog.NewSegmentBufReader(seg)
err = db.loadWAL(wlog.NewReader(sr), multiRef)
if err := sr.Close(); err != nil {
db.logger.Warn("error while closing the wal segments reader", "err", err)
}
if err != nil {
return err
}
db.logger.Info("WAL segment loaded", "segment", i, "maxSegment", last)
}
walReplayDuration := time.Since(start)
db.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds())
return nil
}
func (db *DB) resetWALReplayResources() {
db.walReplaySeriesPool = zeropool.Pool[[]record.RefSeries]{}
db.walReplaySamplesPool = zeropool.Pool[[]record.RefSample]{}
db.walReplayHistogramsPool = zeropool.Pool[[]record.RefHistogramSample]{}
db.walReplayFloatHistogramsPool = zeropool.Pool[[]record.RefFloatHistogramSample]{}
}
func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) {
var (
syms = labels.NewSymbolTable() // One table for the whole WAL.
dec = record.NewDecoder(syms, db.logger)
lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
decoded = make(chan any, 10)
errCh = make(chan error, 1)
)
go func() {
defer close(decoded)
var err error
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
series := db.walReplaySeriesPool.Get()[:0]
series, err = dec.Series(rec, series)
if err != nil {
errCh <- &wlog.CorruptionErr{
Err: fmt.Errorf("decode series: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- series
case record.Samples:
samples := db.walReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(rec, samples)
if err != nil {
errCh <- &wlog.CorruptionErr{
Err: fmt.Errorf("decode samples: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- samples
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
histograms := db.walReplayHistogramsPool.Get()[:0]
histograms, err = dec.HistogramSamples(rec, histograms)
if err != nil {
errCh <- &wlog.CorruptionErr{
Err: fmt.Errorf("decode histogram samples: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- histograms
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
floatHistograms := db.walReplayFloatHistogramsPool.Get()[:0]
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
if err != nil {
errCh <- &wlog.CorruptionErr{
Err: fmt.Errorf("decode float histogram samples: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- floatHistograms
case record.Tombstones, record.Exemplars:
// We don't care about tombstones or exemplars during replay.
// TODO: If decide to decode exemplars, we should make sure to prepopulate
// stripeSeries.exemplars in the next block by using setLatestExemplar.
continue
default:
errCh <- &wlog.CorruptionErr{
Err: fmt.Errorf("invalid record type %v", dec.Type(rec)),
Segment: r.Segment(),
Offset: r.Offset(),
}
}
}
}()
var nonExistentSeriesRefs atomic.Uint64
for d := range decoded {
switch v := d.(type) {
case []record.RefSeries:
for _, entry := range v {
// If this is a new series, create it in memory. If we never read in a
// sample for this series, its timestamp will remain at 0 and it will
// be deleted at the next GC.
if db.series.GetByID(entry.Ref) == nil {
series := &memSeries{ref: entry.Ref, lset: entry.Labels, lastTs: 0}
db.series.Set(entry.Labels.Hash(), series)
multiRef[entry.Ref] = series.ref
db.metrics.numActiveSeries.Inc()
if entry.Ref > lastRef {
lastRef = entry.Ref
}
}
}
db.walReplaySeriesPool.Put(v)
case []record.RefSample:
for _, entry := range v {
// Update the lastTs for the series based
ref, ok := multiRef[entry.Ref]
if !ok {
nonExistentSeriesRefs.Inc()
continue
}
series := db.series.GetByID(ref)
if entry.T > series.lastTs {
series.lastTs = entry.T
}
}
db.walReplaySamplesPool.Put(v)
case []record.RefHistogramSample:
for _, entry := range v {
// Update the lastTs for the series based
ref, ok := multiRef[entry.Ref]
if !ok {
nonExistentSeriesRefs.Inc()
continue
}
series := db.series.GetByID(ref)
if entry.T > series.lastTs {
series.lastTs = entry.T
}
}
db.walReplayHistogramsPool.Put(v)
case []record.RefFloatHistogramSample:
for _, entry := range v {
// Update the lastTs for the series based
ref, ok := multiRef[entry.Ref]
if !ok {
nonExistentSeriesRefs.Inc()
continue
}
series := db.series.GetByID(ref)
if entry.T > series.lastTs {
series.lastTs = entry.T
}
}
db.walReplayFloatHistogramsPool.Put(v)
default:
panic(fmt.Errorf("unexpected decoded type: %T", d))
}
}
if v := nonExistentSeriesRefs.Load(); v > 0 {
db.logger.Warn("found sample referencing non-existing series", "skipped_series", v)
}
db.nextRef.Store(uint64(lastRef))
select {
case err := <-errCh:
return err
default:
if r.Err() != nil {
return fmt.Errorf("read records: %w", r.Err())
}
return nil
}
}
func (db *DB) run() {
defer close(db.donec)
Loop:
for {
select {
case <-db.stopc:
break Loop
case <-time.After(db.opts.TruncateFrequency):
// The timestamp ts is used to determine which series are not receiving
// samples and may be deleted from the WAL. Their most recent append
// timestamp is compared to ts, and if that timestamp is older then ts,
// they are considered inactive and may be deleted.
//
// Subtracting a duration from ts will add a buffer for when series are
// considered inactive and safe for deletion.
ts := max(db.rs.LowestSentTimestamp()-db.opts.MinWALTime, 0)
// Network issues can prevent the result of getRemoteWriteTimestamp from
// changing. We don't want data in the WAL to grow forever, so we set a cap
// on the maximum age data can be. If our ts is older than this cutoff point,
// we'll shift it forward to start deleting very stale data.
if maxTS := timestamp.FromTime(time.Now()) - db.opts.MaxWALTime; ts < maxTS {
ts = maxTS
}
db.logger.Debug("truncating the WAL", "ts", ts)
if err := db.truncate(ts); err != nil {
db.logger.Warn("failed to truncate WAL", "err", err)
}
}
}
}
// keepSeriesInWALCheckpointFn returns a function that is used to determine whether a series record should be kept in the checkpoint.
// last is the last WAL segment that was considered for checkpointing.
// NOTE: the agent implementation here is different from the Prometheus implementation, in that it uses WAL segment numbers instead of timestamps.
func (db *DB) keepSeriesInWALCheckpointFn(last int) func(id chunks.HeadSeriesRef) bool {
return func(id chunks.HeadSeriesRef) bool {
// Keep the record if the series exists in the db.
if db.series.GetByID(id) != nil {
return true
}
// Keep the record if the series was recently deleted.
seg, ok := db.deleted[id]
return ok && seg > last
}
}
func (db *DB) truncate(mint int64) error {
db.logger.Info("series GC started")
db.mtx.RLock()
defer db.mtx.RUnlock()
start := time.Now()
db.gc(mint)
db.logger.Info("series GC completed", "duration", time.Since(start))
first, last, err := wlog.Segments(db.wal.Dir())
if err != nil {
return fmt.Errorf("get segment range: %w", err)
}
// Start a new segment so low ingestion volume instances don't have more WAL
// than needed.
if _, err := db.wal.NextSegment(); err != nil {
return fmt.Errorf("next segment: %w", err)
}
last-- // Never consider most recent segment for checkpoint
if last < 0 {
return nil // no segments yet
}
// The lower two-thirds of segments should contain mostly obsolete samples.
// If we have less than two segments, it's not worth checkpointing yet.
last = first + (last-first)*2/3
if last <= first {
return nil
}
db.metrics.checkpointCreationTotal.Inc()
if _, err = wlog.Checkpoint(db.logger, db.wal, first, last, db.keepSeriesInWALCheckpointFn(last), mint); err != nil {
db.metrics.checkpointCreationFail.Inc()
var cerr *wlog.CorruptionErr
if errors.As(err, &cerr) {
db.metrics.walCorruptionsTotal.Inc()
}
return fmt.Errorf("create checkpoint: %w", err)
}
if err := db.wal.Truncate(last + 1); err != nil {
// If truncating fails, we'll just try it again at the next checkpoint.
// Leftover segments will still just be ignored in the future if there's a
// checkpoint that supersedes them.
db.logger.Error("truncating segments failed", "err", err)
}
// The checkpoint is written and segments before it are truncated, so we
// no longer need to track deleted series that were being kept around.
for ref, segment := range db.deleted {
if segment <= last {
delete(db.deleted, ref)
}
}
db.metrics.checkpointDeleteTotal.Inc()
db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted)))
if err := wlog.DeleteCheckpoints(db.wal.Dir(), last); err != nil {
// Leftover old checkpoints do not cause problems down the line beyond
// occupying disk space. They will just be ignored since a newer checkpoint
// exists.
db.logger.Error("delete old checkpoints", "err", err)
db.metrics.checkpointDeleteFail.Inc()
}
db.metrics.walTruncateDuration.Observe(time.Since(start).Seconds())
db.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start))
return nil
}
// gc marks ref IDs that have not received a sample since mint as deleted in
// s.deleted, along with the segment where they originally got deleted.
func (db *DB) gc(mint int64) {
deleted := db.series.GC(mint)
db.metrics.numActiveSeries.Sub(float64(len(deleted)))
_, last, _ := wlog.Segments(db.wal.Dir())
// We want to keep series records for any newly deleted series
// until we've passed the last recorded segment. This prevents
// the WAL having samples for series records that no longer exist.
for ref := range deleted {
db.deleted[ref] = last
}
db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted)))
}
// StartTime implements the Storage interface.
func (*DB) StartTime() (int64, error) {
return int64(model.Latest), nil
}
// Querier implements the Storage interface.
func (*DB) Querier(int64, int64) (storage.Querier, error) {
return nil, ErrUnsupported
}
// ChunkQuerier implements the Storage interface.
func (*DB) ChunkQuerier(int64, int64) (storage.ChunkQuerier, error) {
return nil, ErrUnsupported
}
// ExemplarQuerier implements the Storage interface.
func (*DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
return nil, ErrUnsupported
}
// Appender implements storage.Storage.
func (db *DB) Appender(context.Context) storage.Appender {
return db.appenderPool.Get().(storage.Appender)
}
// Close implements the Storage interface.
func (db *DB) Close() error {
db.mtx.Lock()
defer db.mtx.Unlock()
close(db.stopc)
<-db.donec
db.metrics.Unregister()
return tsdb_errors.NewMulti(db.locker.Release(), db.wal.Close()).Err()
}
type appenderBase struct {
*DB
pendingSeries []record.RefSeries
pendingSamples []record.RefSample
pendingHistograms []record.RefHistogramSample
pendingFloatHistograms []record.RefFloatHistogramSample
pendingExamplars []record.RefExemplar
// Pointers to the series referenced by each element of pendingSamples.
// Series lock is not held on elements.
sampleSeries []*memSeries
// Pointers to the series referenced by each element of pendingHistograms.
// Series lock is not held on elements.
histogramSeries []*memSeries
// Pointers to the series referenced by each element of pendingFloatHistograms.
// Series lock is not held on elements.
floatHistogramSeries []*memSeries
}
type appender struct {
appenderBase
hints *storage.AppendOptions
}
func (a *appender) SetOptions(opts *storage.AppendOptions) {
a.hints = opts
}
func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
// series references and chunk references are identical for agent mode.
headRef := chunks.HeadSeriesRef(ref)
series := a.series.GetByID(headRef)
if series == nil {
var err error
series, err = a.getOrCreate(l)
if err != nil {
return 0, err
}
}
series.Lock()
defer series.Unlock()
if t <= a.minValidTime(series.lastTs) {
a.metrics.totalOutOfOrderSamples.Inc()
return 0, storage.ErrOutOfOrderSample
}
// NOTE: always modify pendingSamples and sampleSeries together.
a.pendingSamples = append(a.pendingSamples, record.RefSample{
Ref: series.ref,
T: t,
V: v,
})
a.sampleSeries = append(a.sampleSeries, series)
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
return storage.SeriesRef(series.ref), nil
}
func (a *appenderBase) getOrCreate(l labels.Labels) (series *memSeries, err error) {
// Ensure no empty or duplicate labels have gotten through. This mirrors the
// equivalent validation code in the TSDB's headAppender.
l = l.WithoutEmpty()
if l.IsEmpty() {
return nil, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample)
}
if lbl, dup := l.HasDuplicateLabelNames(); dup {
return nil, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample)
}
hash := l.Hash()
series = a.series.GetByHash(hash, l)
if series != nil {
return series, nil
}
ref := chunks.HeadSeriesRef(a.nextRef.Inc())
series = &memSeries{ref: ref, lset: l, lastTs: math.MinInt64}
a.series.Set(hash, series)
a.pendingSeries = append(a.pendingSeries, record.RefSeries{
Ref: series.ref,
Labels: l,
})
a.metrics.numActiveSeries.Inc()
return series, nil
}
func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
// Series references and chunk references are identical for agent mode.
headRef := chunks.HeadSeriesRef(ref)
s := a.series.GetByID(headRef)
if s == nil {
return 0, fmt.Errorf("unknown series ref when trying to add exemplar: %d", ref)
}
// Ensure no empty labels have gotten through.
e.Labels = e.Labels.WithoutEmpty()
if err := a.validateExemplar(s.ref, e); err != nil {
if errors.Is(err, storage.ErrDuplicateExemplar) {
// Duplicate, don't return an error but don't accept the exemplar.
return 0, nil
}
return 0, err
}
a.series.SetLatestExemplar(s.ref, &e)
a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{
Ref: s.ref,
T: e.Ts,
V: e.Value,
Labels: e.Labels,
})
a.metrics.totalAppendedExemplars.Inc()
return storage.SeriesRef(s.ref), nil
}
func (a *appenderBase) validateExemplar(ref chunks.HeadSeriesRef, e exemplar.Exemplar) error {
if lbl, dup := e.Labels.HasDuplicateLabelNames(); dup {
return fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidExemplar)
}
// Exemplar label length does not include chars involved in text rendering such as quotes
// equals sign, or commas. See definition of const ExemplarMaxLabelLength.
labelSetLen := 0
if err := e.Labels.Validate(func(l labels.Label) error {
labelSetLen += utf8.RuneCountInString(l.Name)
labelSetLen += utf8.RuneCountInString(l.Value)
if labelSetLen > exemplar.ExemplarMaxLabelSetLength {
return storage.ErrExemplarLabelLength
}
return nil
}); err != nil {
return err
}
// Check for duplicate vs last stored exemplar for this series, and discard those.
// Otherwise, record the current exemplar as the latest.
// Prometheus' TSDB returns 0 when encountering duplicates, so we do the same here.
prevExemplar := a.series.GetLatestExemplar(ref)
if prevExemplar != nil && prevExemplar.Equals(e) {
return storage.ErrDuplicateExemplar
}
return nil
}
func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
if err := h.Validate(); err != nil {
return 0, err
}
}
if fh != nil {
if err := fh.Validate(); err != nil {
return 0, err
}
}
// series references and chunk references are identical for agent mode.
headRef := chunks.HeadSeriesRef(ref)
series := a.series.GetByID(headRef)
if series == nil {
var err error
series, err = a.getOrCreate(l)
if err != nil {
return 0, err
}
}
series.Lock()
defer series.Unlock()
if t <= a.minValidTime(series.lastTs) {
a.metrics.totalOutOfOrderSamples.Inc()
return 0, storage.ErrOutOfOrderSample
}
switch {
case h != nil:
// NOTE: always modify pendingHistograms and histogramSeries together
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
Ref: series.ref,
T: t,
H: h,
})
a.histogramSeries = append(a.histogramSeries, series)
case fh != nil:
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
Ref: series.ref,
T: t,
FH: fh,
})
a.floatHistogramSeries = append(a.floatHistogramSeries, series)
}
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
return storage.SeriesRef(series.ref), nil
}
func (*appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Wire metadata in the Agent's appender.
return 0, nil
}
func (a *appender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, t, st int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
if err := h.Validate(); err != nil {
return 0, err
}
}
if fh != nil {
if err := fh.Validate(); err != nil {
return 0, err
}
}
if st >= t {
return 0, storage.ErrSTNewerThanSample
}
series := a.series.GetByID(chunks.HeadSeriesRef(ref))
if series == nil {
var err error
series, err = a.getOrCreate(l)
if err != nil {
return 0, err
}
}
series.Lock()
defer series.Unlock()
if st <= a.minValidTime(series.lastTs) {
return 0, storage.ErrOutOfOrderST
}
if st <= series.lastTs {
// discard the sample if it's out of order.
return 0, storage.ErrOutOfOrderST
}
// NOTE(bwplotka): This is a bug, as we "commit" pending sample TS as the WAL last TS. It was likely done
// to satisfy incorrect TestDBStartTimestampSamplesIngestion test. We are leaving it as-is given the planned removal
// of AppenderV1 as per https://github.com/prometheus/prometheus/issues/17632.
series.lastTs = st
switch {
case h != nil:
zeroHistogram := &histogram.Histogram{}
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
Ref: series.ref,
T: st,
H: zeroHistogram,
})
a.histogramSeries = append(a.histogramSeries, series)
case fh != nil:
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
Ref: series.ref,
T: st,
FH: &histogram.FloatHistogram{},
})
a.floatHistogramSeries = append(a.floatHistogramSeries, series)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/tsdb/agent/series_test.go | tsdb/agent/series_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"math"
"strconv"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
)
func TestNoDeadlock(t *testing.T) {
const numWorkers = 1000
var (
wg sync.WaitGroup
started = make(chan struct{})
stripeSeries = newStripeSeries(3)
)
wg.Add(numWorkers)
for range numWorkers {
go func() {
defer wg.Done()
<-started
_ = stripeSeries.GC(math.MaxInt64)
}()
}
wg.Add(numWorkers)
for i := range numWorkers {
go func(i int) {
defer wg.Done()
<-started
series := &memSeries{
ref: chunks.HeadSeriesRef(i),
lset: labels.FromMap(map[string]string{
"id": strconv.Itoa(i),
}),
}
stripeSeries.Set(series.lset.Hash(), series)
}(i)
}
finished := make(chan struct{})
go func() {
wg.Wait()
close(finished)
}()
close(started)
select {
case <-finished:
return
case <-time.After(15 * time.Second):
require.FailNow(t, "deadlock detected")
}
}
func labelsWithHashCollision() (labels.Labels, labels.Labels) {
// These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions
ls1 := labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl")
ls2 := labels.FromStrings("__name__", "metric", "lbl", "RqcXatm")
if ls1.Hash() != ls2.Hash() {
// These ones are the same when using -tags slicelabels
ls1 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y")
ls2 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF")
}
if ls1.Hash() != ls2.Hash() {
panic("This code needs to be updated: find new labels with colliding hash values.")
}
return ls1, ls2
}
// stripeSeriesWithCollidingSeries returns a stripeSeries with two memSeries having the same, colliding, hash.
func stripeSeriesWithCollidingSeries(*testing.T) (*stripeSeries, *memSeries, *memSeries) {
lbls1, lbls2 := labelsWithHashCollision()
ms1 := memSeries{
lset: lbls1,
}
ms2 := memSeries{
lset: lbls2,
}
hash := lbls1.Hash()
s := newStripeSeries(1)
s.Set(hash, &ms1)
s.Set(hash, &ms2)
return s, &ms1, &ms2
}
func TestStripeSeries_Get(t *testing.T) {
s, ms1, ms2 := stripeSeriesWithCollidingSeries(t)
hash := ms1.lset.Hash()
// Verify that we can get both of the series despite the hash collision
got := s.GetByHash(hash, ms1.lset)
require.Same(t, ms1, got)
got = s.GetByHash(hash, ms2.lset)
require.Same(t, ms2, got)
}
func TestStripeSeries_gc(t *testing.T) {
s, ms1, ms2 := stripeSeriesWithCollidingSeries(t)
hash := ms1.lset.Hash()
s.GC(1)
// Verify that we can get neither ms1 nor ms2 after gc-ing corresponding series
got := s.GetByHash(hash, ms1.lset)
require.Nil(t, got)
got = s.GetByHash(hash, ms2.lset)
require.Nil(t, got)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/internal/tools/tools.go | internal/tools/tools.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build tools
// Package tools tracks dependencies for tools that are required to generate the protobuf code.
// See https://github.com/golang/go/issues/25922
package tools
import (
_ "github.com/bufbuild/buf/cmd/buf"
_ "github.com/daixiang0/gci/cmd/gci"
_ "github.com/gogo/protobuf/protoc-gen-gogofast"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2"
)
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/template/template_amd64_test.go | template/template_amd64_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package template
import (
"math"
"testing"
)
// Some test cases rely upon architecture-specific behaviors with respect
// to numerical conversions. The logic remains the same across architectures,
// but outputs can vary, so the cases are only run on amd64.
// See https://github.com/prometheus/prometheus/issues/10185 for more details.
func TestTemplateExpansionAMD64(t *testing.T) {
testTemplateExpansion(t, []scenario{
{
// HumanizeDuration - MaxInt64.
text: "{{ humanizeDuration . }}",
input: math.MaxInt64,
output: "-106751991167300d -15h -30m -8s",
},
{
// HumanizeDuration - MaxUint64.
text: "{{ humanizeDuration . }}",
input: uint(math.MaxUint64),
output: "-106751991167300d -15h -30m -8s",
},
})
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/template/template_test.go | template/template_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package template
import (
"context"
"math"
"net/url"
"reflect"
"testing"
"time"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
)
func TestTemplateExpansion(t *testing.T) {
testTemplateExpansion(t, []scenario{
{
// No template.
text: "plain text",
output: "plain text",
},
{
// Simple value.
text: "{{ 1 }}",
output: "1",
},
{
// Native histogram value.
text: "{{ . | value }}",
input: &sample{Value: &histogram.FloatHistogram{Count: 3, Sum: 10}},
output: (&histogram.FloatHistogram{Count: 3, Sum: 10}).String(),
},
{
// Non-ASCII space (not allowed in text/template, see https://github.com/golang/go/blob/master/src/text/template/parse/lex.go#L98)
text: "{{ }}",
shouldFail: true,
errorMsg: "error parsing template test: template: test:1: unrecognized character in action: U+00A0",
},
{
// HTML escaping.
text: "{{ \"<b>\" }}",
output: "<b>",
html: true,
},
{
// Disabling HTML escaping.
text: "{{ \"<b>\" | safeHtml }}",
output: "<b>",
html: true,
},
{
// HTML escaping doesn't apply to non-html.
text: "{{ \"<b>\" }}",
output: "<b>",
},
{
// Pass multiple arguments to templates.
text: "{{define \"x\"}}{{.arg0}} {{.arg1}}{{end}}{{template \"x\" (args 1 \"2\")}}",
output: "1 2",
},
{
text: "{{ query \"1.5\" | first | value }}",
output: "1.5",
queryResult: promql.Vector{{T: 0, F: 1.5}},
},
{
// Get value from query.
text: "{{ query \"metric{instance='a'}\" | first | value }}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
T: 0,
F: 11,
},
},
output: "11",
},
{
// Get value of a native histogram from query.
text: "{{ query \"metric{instance='a'}\" | first | value }}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
T: 0,
H: &histogram.FloatHistogram{Count: 3, Sum: 10},
},
},
output: (&histogram.FloatHistogram{Count: 3, Sum: 10}).String(),
},
{
// Get label from query.
text: "{{ query \"metric{instance='a'}\" | first | label \"instance\" }}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
T: 0,
F: 11,
},
},
output: "a",
},
{
// Get label "__value__" from query.
text: "{{ query \"metric{__value__='a'}\" | first | strvalue }}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"),
T: 0,
F: 11,
},
},
output: "a",
},
{
// Missing label is empty when using label function.
text: "{{ query \"metric{instance='a'}\" | first | label \"foo\" }}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
T: 0,
F: 11,
},
},
output: "",
},
{
// Missing label is empty when not using label function.
text: "{{ $x := query \"metric\" | first }}{{ $x.Labels.foo }}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
T: 0,
F: 11,
},
},
output: "",
},
{
text: "{{ $x := query \"metric\" | first }}{{ $x.Labels.foo }}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
T: 0,
F: 11,
},
},
output: "",
html: true,
},
{
// Range over query and sort by label.
text: "{{ range query \"metric\" | sortByLabel \"instance\" }}{{.Labels.instance}}:{{.Value}}: {{end}}",
queryResult: promql.Vector{
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "b"),
T: 0,
F: 21,
}, {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
T: 0,
F: 11,
},
},
output: "a:11: b:21: ",
},
{
// Simple hostname.
text: "{{ \"foo.example.com\" | stripPort }}",
output: "foo.example.com",
},
{
// Hostname with port.
text: "{{ \"foo.example.com:12345\" | stripPort }}",
output: "foo.example.com",
},
{
// Simple IPv4 address.
text: "{{ \"192.0.2.1\" | stripPort }}",
output: "192.0.2.1",
},
{
// IPv4 address with port.
text: "{{ \"192.0.2.1:12345\" | stripPort }}",
output: "192.0.2.1",
},
{
// Simple IPv6 address.
text: "{{ \"2001:0DB8::1\" | stripPort }}",
output: "2001:0DB8::1",
},
{
// IPv6 address with port.
text: "{{ \"[2001:0DB8::1]:12345\" | stripPort }}",
output: "2001:0DB8::1",
},
{
// Value can't be split into host and port.
text: "{{ \"[2001:0DB8::1]::12345\" | stripPort }}",
output: "[2001:0DB8::1]::12345",
},
{
// Missing value is no value for nil options.
text: "{{ .Foo }}",
output: "<no value>",
},
{
// Missing value is no value for no options.
text: "{{ .Foo }}",
options: make([]string, 0),
output: "<no value>",
},
{
// Assert that missing value returns error with missingkey=error.
text: "{{ .Foo }}",
options: []string{"missingkey=error"},
shouldFail: true,
errorMsg: `error executing template test: template: test:1:3: executing "test" at <.Foo>: nil data; no entry for key "Foo"`,
},
{
// Missing value is "" for nil options in ExpandHTML.
text: "{{ .Foo }}",
output: "",
html: true,
},
{
// Missing value is "" for no options in ExpandHTML.
text: "{{ .Foo }}",
options: make([]string, 0),
output: "",
html: true,
},
{
// Assert that missing value returns error with missingkey=error in ExpandHTML.
text: "{{ .Foo }}",
options: []string{"missingkey=error"},
shouldFail: true,
errorMsg: `error executing template test: template: test:1:3: executing "test" at <.Foo>: nil data; no entry for key "Foo"`,
html: true,
},
{
// Unparsable template.
text: "{{",
shouldFail: true,
errorMsg: "error parsing template test: template: test:1: unclosed action",
},
{
// Error in function.
text: "{{ query \"missing\" | first }}",
queryResult: promql.Vector{},
shouldFail: true,
errorMsg: "error executing template test: template: test:1:21: executing \"test\" at <first>: error calling first: first() called on vector with no elements",
},
{
// Panic.
text: "{{ (query \"missing\").banana }}",
queryResult: promql.Vector{},
shouldFail: true,
errorMsg: "error executing template test: template: test:1:10: executing \"test\" at <\"missing\">: can't evaluate field banana in type template.queryResult",
},
{
// Regex replacement.
text: "{{ reReplaceAll \"(a)b\" \"x$1\" \"ab\" }}",
output: "xa",
},
{
// Humanize - float64.
text: "{{ range . }}{{ humanize . }}:{{ end }}",
input: []float64{0.0, 1.0, 1234567.0, .12},
output: "0:1:1.235M:120m:",
},
{
// Humanize - string.
text: "{{ range . }}{{ humanize . }}:{{ end }}",
input: []string{"0.0", "1.0", "1234567.0", ".12"},
output: "0:1:1.235M:120m:",
},
{
// Humanize - string with error.
text: `{{ humanize "one" }}`,
shouldFail: true,
errorMsg: `error executing template test: template: test:1:3: executing "test" at <humanize "one">: error calling humanize: strconv.ParseFloat: parsing "one": invalid syntax`,
},
{
// Humanize - int.
text: "{{ range . }}{{ humanize . }}:{{ end }}",
input: []int64{0, -1, 1, 1234567, math.MaxInt64},
output: "0:-1:1:1.235M:9.223E:",
},
{
// Humanize - uint.
text: "{{ range . }}{{ humanize . }}:{{ end }}",
input: []uint64{0, 1, 1234567, math.MaxUint64},
output: "0:1:1.235M:18.45E:",
},
{
// Humanize1024 - float64.
text: "{{ range . }}{{ humanize1024 . }}:{{ end }}",
input: []float64{0.0, 1.0, 1048576.0, .12},
output: "0:1:1Mi:0.12:",
},
{
// Humanize1024 - string.
text: "{{ range . }}{{ humanize1024 . }}:{{ end }}",
input: []string{"0.0", "1.0", "1048576.0", ".12"},
output: "0:1:1Mi:0.12:",
},
{
// Humanize1024 - string with error.
text: `{{ humanize1024 "one" }}`,
shouldFail: true,
errorMsg: `error executing template test: template: test:1:3: executing "test" at <humanize1024 "one">: error calling humanize1024: strconv.ParseFloat: parsing "one": invalid syntax`,
},
{
// Humanize1024 - int.
text: "{{ range . }}{{ humanize1024 . }}:{{ end }}",
input: []int64{0, -1, 1, 1234567, math.MaxInt64},
output: "0:-1:1:1.177Mi:8Ei:",
},
{
// Humanize1024 - uint.
text: "{{ range . }}{{ humanize1024 . }}:{{ end }}",
input: []uint64{0, 1, 1234567, math.MaxUint64},
output: "0:1:1.177Mi:16Ei:",
},
{
// HumanizeDuration - seconds - float64.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []float64{0, 1, 60, 3600, 86400, 86400 + 3600, -(86400*2 + 3600*3 + 60*4 + 5), 899.99},
output: "0s:1s:1m 0s:1h 0m 0s:1d 0h 0m 0s:1d 1h 0m 0s:-2d 3h 4m 5s:14m 59s:",
},
{
// HumanizeDuration - seconds - string.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []string{"0", "1", "60", "3600", "86400"},
output: "0s:1s:1m 0s:1h 0m 0s:1d 0h 0m 0s:",
},
{
// HumanizeDuration - subsecond and fractional seconds - float64.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []float64{.1, .0001, .12345, 60.1, 60.5, 1.2345, 12.345},
output: "100ms:100us:123.5ms:1m 0s:1m 0s:1.234s:12.35s:",
},
{
// HumanizeDuration - subsecond and fractional seconds - string.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []string{".1", ".0001", ".12345", "60.1", "60.5", "1.2345", "12.345"},
output: "100ms:100us:123.5ms:1m 0s:1m 0s:1.234s:12.35s:",
},
{
// HumanizeDuration - string with error.
text: `{{ humanizeDuration "one" }}`,
shouldFail: true,
errorMsg: `error executing template test: template: test:1:3: executing "test" at <humanizeDuration "one">: error calling humanizeDuration: strconv.ParseFloat: parsing "one": invalid syntax`,
},
{
// HumanizeDuration - int.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []int{0, -1, 1, 1234567},
output: "0s:-1s:1s:14d 6h 56m 7s:",
},
{
// HumanizeDuration - uint.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []uint{0, 1, 1234567},
output: "0s:1s:14d 6h 56m 7s:",
},
{
// Humanize* Inf and NaN - float64.
text: "{{ range . }}{{ humanize . }}:{{ humanize1024 . }}:{{ humanizeDuration . }}:{{humanizeTimestamp .}}:{{ end }}",
input: []float64{math.Inf(1), math.Inf(-1), math.NaN()},
output: "+Inf:+Inf:+Inf:+Inf:-Inf:-Inf:-Inf:-Inf:NaN:NaN:NaN:NaN:",
},
{
// Humanize* Inf and NaN - string.
text: "{{ range . }}{{ humanize . }}:{{ humanize1024 . }}:{{ humanizeDuration . }}:{{humanizeTimestamp .}}:{{ end }}",
input: []string{"+Inf", "-Inf", "NaN"},
output: "+Inf:+Inf:+Inf:+Inf:-Inf:-Inf:-Inf:-Inf:NaN:NaN:NaN:NaN:",
},
{
// HumanizePercentage - model.SampleValue input - float64.
text: "{{ -0.22222 | humanizePercentage }}:{{ 0.0 | humanizePercentage }}:{{ 0.1234567 | humanizePercentage }}:{{ 1.23456 | humanizePercentage }}",
output: "-22.22%:0%:12.35%:123.5%",
},
{
// HumanizePercentage - int.
text: "{{ range . }}{{ humanizePercentage . }}:{{ end }}",
input: []int64{0, -1, 1, 1234567, math.MaxInt64},
output: "0%:-100%:100%:1.235e+08%:9.223e+20%:",
},
{
// HumanizePercentage - uint.
text: "{{ range . }}{{ humanizePercentage . }}:{{ end }}",
input: []uint64{0, 1, 1234567, math.MaxUint64},
output: "0%:100%:1.235e+08%:1.845e+21%:",
},
{
// HumanizePercentage - model.SampleValue input - string.
text: `{{ "-0.22222" | humanizePercentage }}:{{ "0.0" | humanizePercentage }}:{{ "0.1234567" | humanizePercentage }}:{{ "1.23456" | humanizePercentage }}`,
output: "-22.22%:0%:12.35%:123.5%",
},
{
// HumanizePercentage - model.SampleValue input - string with error.
text: `{{ "one" | humanizePercentage }}`,
shouldFail: true,
errorMsg: `error executing template test: template: test:1:11: executing "test" at <humanizePercentage>: error calling humanizePercentage: strconv.ParseFloat: parsing "one": invalid syntax`,
},
{
// HumanizeTimestamp - int.
text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
input: []int64{0, -1, 1, 1234567, 9223372036},
output: "1970-01-01 00:00:00 +0000 UTC:1969-12-31 23:59:59 +0000 UTC:1970-01-01 00:00:01 +0000 UTC:1970-01-15 06:56:07 +0000 UTC:2262-04-11 23:47:16 +0000 UTC:",
},
{
// HumanizeTimestamp - uint.
text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
input: []uint64{0, 1, 1234567, 9223372036},
output: "1970-01-01 00:00:00 +0000 UTC:1970-01-01 00:00:01 +0000 UTC:1970-01-15 06:56:07 +0000 UTC:2262-04-11 23:47:16 +0000 UTC:",
},
{
// HumanizeTimestamp - int with error.
text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
input: []int64{math.MinInt64, math.MaxInt64},
shouldFail: true,
errorMsg: `error executing template test: template: test:1:16: executing "test" at <humanizeTimestamp .>: error calling humanizeTimestamp: -9.223372036854776e+18 cannot be represented as a nanoseconds timestamp since it overflows int64`,
},
{
// HumanizeTimestamp - uint with error.
text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
input: []uint64{math.MaxUint64},
shouldFail: true,
errorMsg: `error executing template test: template: test:1:16: executing "test" at <humanizeTimestamp .>: error calling humanizeTimestamp: 1.8446744073709552e+19 cannot be represented as a nanoseconds timestamp since it overflows int64`,
},
{
// HumanizeTimestamp - model.SampleValue input - float64.
text: "{{ 1435065584.128 | humanizeTimestamp }}",
output: "2015-06-23 13:19:44.128 +0000 UTC",
},
{
// HumanizeTimestamp - model.SampleValue input - string.
text: `{{ "1435065584.128" | humanizeTimestamp }}`,
output: "2015-06-23 13:19:44.128 +0000 UTC",
},
{
// ToTime - model.SampleValue input - float64.
text: `{{ (1435065584.128 | toTime).Format "2006" }}`,
output: "2015",
},
{
// ToTime - model.SampleValue input - string.
text: `{{ ("1435065584.128" | toTime).Format "2006" }}`,
output: "2015",
},
{
// toDuration - input as float64 seconds, returns *time.Duration.
text: `{{ (1800 | toDuration).String }}`,
output: "30m0s",
},
{
// toDuration - input as string seconds, returns *time.Duration.
text: `{{ ("1800" | toDuration).String }}`,
output: "30m0s",
},
{
// now - returns fixed timestamp as float64 seconds.
text: `{{ now }}`,
output: "1.353755652e+09",
},
{
// now - returns fixed timestamp converted to formatted time string.
text: `{{ (now | toTime).Format "Mon Jan 2 15:04:05 2006" }}`,
output: "Sat Nov 24 11:14:12 2012",
},
{
// returns Unix milliseconds timestamp for 30 minutes ago.
text: `{{ ("-30m" | parseDuration | toDuration | (now | toTime).Add).UnixMilli }}`,
output: "1353753852000",
},
{
// Title.
text: "{{ \"aa bb CC\" | title }}",
output: "Aa Bb CC",
},
{
// toUpper.
text: "{{ \"aa bb CC\" | toUpper }}",
output: "AA BB CC",
},
{
// toLower.
text: "{{ \"aA bB CC\" | toLower }}",
output: "aa bb cc",
},
{
// Match.
text: "{{ match \"a+\" \"aa\" }} {{ match \"a+\" \"b\" }}",
output: "true false",
},
{
// graphLink.
text: "{{ graphLink \"up\" }}",
output: "/graph?g0.expr=up&g0.tab=0",
},
{
// tableLink.
text: "{{ tableLink \"up\" }}",
output: "/graph?g0.expr=up&g0.tab=1",
},
{
// tmpl.
text: "{{ define \"a\" }}x{{ end }}{{ $name := \"a\"}}{{ tmpl $name . }}",
output: "x",
html: true,
},
{
// pathPrefix.
text: "{{ pathPrefix }}",
output: "/path/prefix",
},
{
// externalURL.
text: "{{ externalURL }}",
output: "http://testhost:9090/path/prefix",
},
{
// parseDuration with positive duration (using printf to ensure the return is a string).
text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}",
output: "3720.01",
},
{
// parseDuration with negative duration (using printf to ensure the return is a string).
text: "{{ printf \"%0.2f\" (parseDuration \"-1h2m10ms\") }}",
output: "-3720.01",
},
{
// Simple hostname.
text: "{{ \"foo.example.com\" | stripDomain }}",
output: "foo",
},
{
// Hostname with port.
text: "{{ \"foo.example.com:12345\" | stripDomain }}",
output: "foo:12345",
},
{
// Simple IPv4 address.
text: "{{ \"192.0.2.1\" | stripDomain }}",
output: "192.0.2.1",
},
{
// IPv4 address with port.
text: "{{ \"192.0.2.1:12345\" | stripDomain }}",
output: "192.0.2.1:12345",
},
{
// Simple IPv6 address.
text: "{{ \"2001:0DB8::1\" | stripDomain }}",
output: "2001:0DB8::1",
},
{
// IPv6 address with port.
text: "{{ \"[2001:0DB8::1]:12345\" | stripDomain }}",
output: "[2001:0DB8::1]:12345",
},
{
// Value can't be split into host and port.
text: "{{ \"[2001:0DB8::1]::12345\" | stripDomain }}",
output: "[2001:0DB8::1]::12345",
},
})
}
type scenario struct {
text string
output string
input any
options []string
queryResult promql.Vector
shouldFail bool
html bool
errorMsg string
}
func testTemplateExpansion(t *testing.T, scenarios []scenario) {
extURL, err := url.Parse("http://testhost:9090/path/prefix")
if err != nil {
panic(err)
}
for _, s := range scenarios {
queryFunc := func(context.Context, string, time.Time) (promql.Vector, error) {
return s.queryResult, nil
}
var result string
var err error
expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, model.Time(1353755652000), queryFunc, extURL, s.options)
if s.html {
result, err = expander.ExpandHTML(nil)
} else {
result, err = expander.Expand()
}
if s.shouldFail {
require.Error(t, err, "%v", s.text)
require.EqualError(t, err, s.errorMsg)
continue
}
require.NoError(t, err)
if err == nil {
require.Equal(t, s.output, result)
}
}
}
func Test_floatToTime(t *testing.T) {
type args struct {
v float64
}
tests := []struct {
name string
args args
want *time.Time
wantErr bool
}{
{
"happy path",
args{
v: 1657155181,
},
func() *time.Time {
tm := time.Date(2022, 7, 7, 0, 53, 1, 0, time.UTC)
return &tm
}(),
false,
},
{
"more than math.MaxInt64",
args{
v: 1.79769313486231570814527423731704356798070e+300,
},
nil,
true,
},
{
"less than math.MinInt64",
args{
v: -1.79769313486231570814527423731704356798070e+300,
},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := floatToTime(tt.args.v)
if (err != nil) != tt.wantErr {
t.Errorf("floatToTime() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("floatToTime() got = %v, want %v", got, tt.want)
}
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/template/template.go | template/template.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package template
import (
"bytes"
"context"
"errors"
"fmt"
html_template "html/template"
"maps"
"math"
"net"
"net/url"
"sort"
"strings"
text_template "text/template"
"time"
"github.com/grafana/regexp"
"github.com/prometheus/client_golang/prometheus"
common_templates "github.com/prometheus/common/helpers/templates"
"github.com/prometheus/common/model"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/strutil"
)
var (
templateTextExpansionFailures = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_template_text_expansion_failures_total",
Help: "The total number of template text expansion failures.",
})
templateTextExpansionTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_template_text_expansions_total",
Help: "The total number of template text expansions.",
})
errNaNOrInf = errors.New("value is NaN or Inf")
)
func init() {
prometheus.MustRegister(templateTextExpansionFailures)
prometheus.MustRegister(templateTextExpansionTotal)
}
// A version of vector that's easier to use from templates.
type sample struct {
Labels map[string]string
Value any
}
type queryResult []*sample
type queryResultByLabelSorter struct {
results queryResult
by string
}
func (q queryResultByLabelSorter) Len() int {
return len(q.results)
}
func (q queryResultByLabelSorter) Less(i, j int) bool {
return q.results[i].Labels[q.by] < q.results[j].Labels[q.by]
}
func (q queryResultByLabelSorter) Swap(i, j int) {
q.results[i], q.results[j] = q.results[j], q.results[i]
}
// QueryFunc executes a PromQL query at the given time.
type QueryFunc func(context.Context, string, time.Time) (promql.Vector, error)
func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (queryResult, error) {
vector, err := queryFn(ctx, q, ts)
if err != nil {
return nil, err
}
// promql.Vector is hard to work with in templates, so convert to
// base data types.
// TODO(fabxc): probably not true anymore after type rework.
result := make(queryResult, len(vector))
for n, v := range vector {
s := sample{
Value: v.F,
Labels: v.Metric.Map(),
}
if v.H != nil {
s.Value = v.H
}
result[n] = &s
}
return result, nil
}
// Expander executes templates in text or HTML mode with a common set of Prometheus template functions.
type Expander struct {
text string
name string
data any
funcMap text_template.FuncMap
options []string
}
// NewTemplateExpander returns a template expander ready to use.
func NewTemplateExpander(
ctx context.Context,
text string,
name string,
data any,
timestamp model.Time,
queryFunc QueryFunc,
externalURL *url.URL,
options []string,
) *Expander {
if options == nil {
options = []string{"missingkey=zero"}
}
return &Expander{
text: text,
name: name,
data: data,
funcMap: text_template.FuncMap{
"query": func(q string) (queryResult, error) {
return query(ctx, q, timestamp.Time(), queryFunc)
},
"first": func(v queryResult) (*sample, error) {
if len(v) > 0 {
return v[0], nil
}
return nil, errors.New("first() called on vector with no elements")
},
"label": func(label string, s *sample) string {
return s.Labels[label]
},
"value": func(s *sample) any {
return s.Value
},
"strvalue": func(s *sample) string {
return s.Labels["__value__"]
},
"args": func(args ...any) map[string]any {
result := make(map[string]any)
for i, a := range args {
result[fmt.Sprintf("arg%d", i)] = a
}
return result
},
"reReplaceAll": func(pattern, repl, text string) string {
re := regexp.MustCompile(pattern)
return re.ReplaceAllString(text, repl)
},
"safeHtml": func(text string) html_template.HTML {
return html_template.HTML(text)
},
"match": regexp.MatchString,
"title": cases.Title(language.AmericanEnglish, cases.NoLower).String,
"toUpper": strings.ToUpper,
"toLower": strings.ToLower,
"graphLink": strutil.GraphLinkForExpression,
"tableLink": strutil.TableLinkForExpression,
"sortByLabel": func(label string, v queryResult) queryResult {
sorter := queryResultByLabelSorter{v[:], label}
sort.Stable(sorter)
return v
},
"stripPort": func(hostPort string) string {
host, _, err := net.SplitHostPort(hostPort)
if err != nil {
return hostPort
}
return host
},
"stripDomain": func(hostPort string) string {
host, port, err := net.SplitHostPort(hostPort)
if err != nil {
host = hostPort
}
ip := net.ParseIP(host)
if ip != nil {
return hostPort
}
host = strings.Split(host, ".")[0]
if port != "" {
return net.JoinHostPort(host, port)
}
return host
},
"humanize": func(i any) (string, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
if v == 0 || math.IsNaN(v) || math.IsInf(v, 0) {
return fmt.Sprintf("%.4g", v), nil
}
if math.Abs(v) >= 1 {
prefix := ""
for _, p := range []string{"k", "M", "G", "T", "P", "E", "Z", "Y"} {
if math.Abs(v) < 1000 {
break
}
prefix = p
v /= 1000
}
return fmt.Sprintf("%.4g%s", v, prefix), nil
}
prefix := ""
for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} {
if math.Abs(v) >= 1 {
break
}
prefix = p
v *= 1000
}
return fmt.Sprintf("%.4g%s", v, prefix), nil
},
"humanize1024": func(i any) (string, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
if math.Abs(v) <= 1 || math.IsNaN(v) || math.IsInf(v, 0) {
return fmt.Sprintf("%.4g", v), nil
}
prefix := ""
for _, p := range []string{"ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"} {
if math.Abs(v) < 1024 {
break
}
prefix = p
v /= 1024
}
return fmt.Sprintf("%.4g%s", v, prefix), nil
},
"humanizeDuration": common_templates.HumanizeDuration,
"humanizePercentage": func(i any) (string, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
return fmt.Sprintf("%.4g%%", v*100), nil
},
"humanizeTimestamp": common_templates.HumanizeTimestamp,
"toTime": func(i any) (*time.Time, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return nil, err
}
return floatToTime(v)
},
"toDuration": func(i any) (*time.Duration, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return nil, err
}
d := time.Duration(v * float64(time.Second))
return &d, nil
},
"now": func() float64 {
return float64(timestamp) / 1000.0
},
"pathPrefix": func() string {
return externalURL.Path
},
"externalURL": func() string {
return externalURL.String()
},
"parseDuration": func(d string) (float64, error) {
v, err := model.ParseDurationAllowNegative(d)
if err != nil {
return 0, err
}
return float64(time.Duration(v)) / float64(time.Second), nil
},
"urlQueryEscape": url.QueryEscape,
},
options: options,
}
}
// AlertTemplateData returns the interface to be used in expanding the template.
func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, smpl promql.Sample) any {
res := struct {
Labels map[string]string
ExternalLabels map[string]string
ExternalURL string
Value any
}{
Labels: labels,
ExternalLabels: externalLabels,
ExternalURL: externalURL,
Value: smpl.F,
}
if smpl.H != nil {
res.Value = smpl.H
}
return res
}
// Funcs adds the functions in fm to the Expander's function map.
// Existing functions will be overwritten in case of conflict.
func (te Expander) Funcs(fm text_template.FuncMap) {
maps.Copy(te.funcMap, fm)
}
// Expand expands a template in text (non-HTML) mode.
func (te Expander) Expand() (result string, resultErr error) {
// It'd better to have no alert description than to kill the whole process
// if there's a bug in the template.
defer func() {
if r := recover(); r != nil {
var ok bool
resultErr, ok = r.(error)
if !ok {
resultErr = fmt.Errorf("panic expanding template %v: %v", te.name, r)
}
}
if resultErr != nil {
templateTextExpansionFailures.Inc()
}
}()
templateTextExpansionTotal.Inc()
tmpl := text_template.New(te.name).Funcs(te.funcMap)
tmpl.Option(te.options...)
tmpl, err := tmpl.Parse(te.text)
if err != nil {
return "", fmt.Errorf("error parsing template %v: %w", te.name, err)
}
var buffer bytes.Buffer
err = tmpl.Execute(&buffer, te.data)
if err != nil {
return "", fmt.Errorf("error executing template %v: %w", te.name, err)
}
return buffer.String(), nil
}
// ExpandHTML expands a template with HTML escaping, with templates read from the given files.
func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr error) {
defer func() {
if r := recover(); r != nil {
var ok bool
resultErr, ok = r.(error)
if !ok {
resultErr = fmt.Errorf("panic expanding template %s: %v", te.name, r)
}
}
}()
//nolint:unconvert // Before Go 1.19 conversion from text_template to html_template is mandatory
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
tmpl.Option(te.options...)
tmpl.Funcs(html_template.FuncMap{
"tmpl": func(name string, data any) (html_template.HTML, error) {
var buffer bytes.Buffer
err := tmpl.ExecuteTemplate(&buffer, name, data)
return html_template.HTML(buffer.String()), err
},
})
tmpl, err := tmpl.Parse(te.text)
if err != nil {
return "", fmt.Errorf("error parsing template %v: %w", te.name, err)
}
if len(templateFiles) > 0 {
_, err = tmpl.ParseFiles(templateFiles...)
if err != nil {
return "", fmt.Errorf("error parsing template files for %v: %w", te.name, err)
}
}
var buffer bytes.Buffer
err = tmpl.Execute(&buffer, te.data)
if err != nil {
return "", fmt.Errorf("error executing template %v: %w", te.name, err)
}
return buffer.String(), nil
}
// ParseTest parses the templates and returns the error if any.
func (te Expander) ParseTest() error {
_, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
if err != nil {
return err
}
return nil
}
func floatToTime(v float64) (*time.Time, error) {
if math.IsNaN(v) || math.IsInf(v, 0) {
return nil, errNaNOrInf
}
timestamp := v * 1e9
if timestamp > math.MaxInt64 || timestamp < math.MinInt64 {
return nil, fmt.Errorf("%v cannot be represented as a nanoseconds timestamp since it overflows int64", v)
}
t := model.TimeFromUnixNano(int64(timestamp)).Time().UTC()
return &t, nil
}
// templateFunctions returns a representative funcMap with all available template functions.
// This is used to discover which functions are available for feature registration.
func templateFunctions() text_template.FuncMap {
// Create a dummy expander to get the function map.
expander := NewTemplateExpander(
context.Background(),
"",
"",
nil,
0,
nil,
&url.URL{},
nil,
)
return expander.funcMap
}
// RegisterFeatures registers all template functions with the feature registry.
func RegisterFeatures(r features.Collector) {
// Get all function names from the template function map.
funcMap := templateFunctions()
for name := range funcMap {
r.Enable(features.TemplatingFunctions, name)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/config/config_windows_test.go | config/config_windows_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml"
var ruleFilesExpectedConf = &Config{
loaded: true,
GlobalConfig: DefaultGlobalConfig,
Runtime: DefaultRuntimeConfig,
RuleFiles: []string{
"testdata\\first.rules",
"testdata\\rules\\second.rules",
"c:\\absolute\\third.rules",
},
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/config/config_default_test.go | config/config_default_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
package config
const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml"
var ruleFilesExpectedConf = &Config{
loaded: true,
GlobalConfig: DefaultGlobalConfig,
Runtime: DefaultRuntimeConfig,
OTLPConfig: DefaultOTLPConfig,
RuleFiles: []string{
"testdata/first.rules",
"testdata/rules/second.rules",
"/absolute/third.rules",
},
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/config/config.go | config/config.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"log/slog"
"mime"
"net/url"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
"time"
"github.com/alecthomas/units"
"github.com/grafana/regexp"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/otlptranslator"
"github.com/prometheus/sigv4"
"go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote/azuread"
"github.com/prometheus/prometheus/storage/remote/googleiam"
)
var (
patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`)
reservedHeaders = map[string]struct{}{
// NOTE: authorization is checked specially,
// see RemoteWriteConfig.UnmarshalYAML.
// "authorization": {},
"host": {},
"content-encoding": {},
"content-length": {},
"content-type": {},
"user-agent": {},
"connection": {},
"keep-alive": {},
"proxy-authenticate": {},
"proxy-authorization": {},
"www-authenticate": {},
"accept-encoding": {},
"x-prometheus-remote-write-version": {},
"x-prometheus-remote-read-version": {},
// Added by SigV4.
"x-amz-date": {},
"x-amz-security-token": {},
"x-amz-content-sha256": {},
}
)
// Load parses the YAML input s into a Config.
func Load(s string, logger *slog.Logger) (*Config, error) {
cfg := &Config{}
// If the entire config body is empty the UnmarshalYAML method is
// never called. We thus have to set the DefaultConfig at the entry
// point as well.
*cfg = DefaultConfig
err := yaml.UnmarshalStrict([]byte(s), cfg)
if err != nil {
return nil, err
}
b := labels.NewScratchBuilder(0)
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
newV := os.Expand(v.Value, func(s string) string {
if s == "$" {
return "$"
}
if v := os.Getenv(s); v != "" {
return v
}
logger.Warn("Empty environment variable", "name", s)
return ""
})
if newV != v.Value {
logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV)
}
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
b.Add(v.Name, newV)
})
if !b.Labels().IsEmpty() {
cfg.GlobalConfig.ExternalLabels = b.Labels()
}
switch cfg.OTLPConfig.TranslationStrategy {
case otlptranslator.UnderscoreEscapingWithSuffixes, otlptranslator.UnderscoreEscapingWithoutSuffixes:
case "":
case otlptranslator.NoTranslation, otlptranslator.NoUTF8EscapingWithSuffixes:
if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation {
return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy)
}
default:
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
}
cfg.loaded = true
return cfg, nil
}
// LoadFile parses and validates the given YAML file into a read-only Config.
// Callers should never write to or shallow copy the returned Config.
func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) {
content, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
cfg, err := Load(string(content), logger)
if err != nil {
return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err)
}
if agentMode {
if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 {
return nil, errors.New("field alerting is not allowed in agent mode")
}
if len(cfg.RuleFiles) > 0 {
return nil, errors.New("field rule_files is not allowed in agent mode")
}
if len(cfg.RemoteReadConfigs) > 0 {
return nil, errors.New("field remote_read is not allowed in agent mode")
}
}
cfg.SetDirectory(filepath.Dir(filename))
return cfg, nil
}
func boolPtr(b bool) *bool {
return &b
}
// The defaults applied before parsing the respective config sections.
var (
// DefaultConfig is the default top-level configuration.
DefaultConfig = Config{
GlobalConfig: DefaultGlobalConfig,
Runtime: DefaultRuntimeConfig,
OTLPConfig: DefaultOTLPConfig,
}
// DefaultGlobalConfig is the default global configuration.
DefaultGlobalConfig = GlobalConfig{
ScrapeInterval: model.Duration(1 * time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(1 * time.Minute),
RuleQueryOffset: model.Duration(0 * time.Minute),
// This is nil to be able to distinguish between the case when
// the normal default should be used and the case when a
// new default is needed due to an enabled feature flag.
// E.g. set to `DefaultProtoFirstScrapeProtocols` when
// the feature flag `created-timestamp-zero-ingestion` is set.
ScrapeProtocols: nil,
// When the native histogram feature flag is enabled,
// ScrapeNativeHistograms default changes to true.
ScrapeNativeHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: false,
AlwaysScrapeClassicHistograms: false,
ExtraScrapeMetrics: boolPtr(false),
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
}
DefaultRuntimeConfig = RuntimeConfig{
// Go runtime tuning.
GoGC: getGoGC(),
}
// DefaultScrapeConfig is the default scrape configuration. Users of this
// default MUST call Validate() on the config after creation, even if it's
// used unaltered, to check for parameter correctness and fill out default
// values that can't be set inline in this declaration.
DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout, ScrapeInterval, ScrapeProtocols, AlwaysScrapeClassicHistograms, and ConvertClassicHistogramsToNHCB default to the configured globals.
MetricsPath: "/metrics",
Scheme: "http",
HonorLabels: false,
HonorTimestamps: true,
HTTPClientConfig: config.DefaultHTTPClientConfig,
EnableCompression: true,
}
// DefaultAlertmanagerConfig is the default alertmanager configuration.
DefaultAlertmanagerConfig = AlertmanagerConfig{
Scheme: "http",
Timeout: model.Duration(10 * time.Second),
APIVersion: AlertmanagerAPIVersionV2,
HTTPClientConfig: config.DefaultHTTPClientConfig,
}
DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: false,
}
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
RemoteTimeout: model.Duration(30 * time.Second),
ProtobufMessage: remoteapi.WriteV1MessageType,
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig,
}
// DefaultQueueConfig is the default remote queue configuration.
DefaultQueueConfig = QueueConfig{
// With a maximum of 50 shards, assuming an average of 100ms remote write
// time and 2000 samples per batch, we will be able to push 1M samples/s.
MaxShards: 50,
MinShards: 1,
MaxSamplesPerSend: 2000,
// Each shard will have a max of 10,000 samples pending in its channel, plus the pending
// samples that have been enqueued. Theoretically we should only ever have about 12,000 samples
// per shard pending. At 50 shards that's 600k.
Capacity: 10000,
BatchSendDeadline: model.Duration(5 * time.Second),
// Backoff times for retrying a batch of samples on recoverable errors.
MinBackoff: model.Duration(30 * time.Millisecond),
MaxBackoff: model.Duration(5 * time.Second),
}
// DefaultMetadataConfig is the default metadata configuration for a remote write endpoint.
DefaultMetadataConfig = MetadataConfig{
Send: true,
SendInterval: model.Duration(1 * time.Minute),
MaxSamplesPerSend: 2000,
}
// DefaultRemoteReadConfig is the default remote read configuration.
DefaultRemoteReadConfig = RemoteReadConfig{
RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
HTTPClientConfig: config.DefaultHTTPClientConfig,
FilterExternalLabels: true,
}
// DefaultStorageConfig is the default TSDB/Exemplar storage configuration.
DefaultStorageConfig = StorageConfig{
ExemplarsConfig: &DefaultExemplarsConfig,
}
DefaultExemplarsConfig = ExemplarsConfig{
MaxExemplars: 100000,
}
// DefaultOTLPConfig is the default OTLP configuration.
DefaultOTLPConfig = OTLPConfig{
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
// For backwards compatibility.
LabelNameUnderscoreSanitization: true,
// For backwards compatibility.
LabelNamePreserveMultipleUnderscores: true,
}
)
// Config is the top-level configuration for Prometheus's config files.
type Config struct {
GlobalConfig GlobalConfig `yaml:"global"`
Runtime RuntimeConfig `yaml:"runtime,omitempty"`
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
RuleFiles []string `yaml:"rule_files,omitempty"`
ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"`
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
StorageConfig StorageConfig `yaml:"storage,omitempty"`
TracingConfig TracingConfig `yaml:"tracing,omitempty"`
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
OTLPConfig OTLPConfig `yaml:"otlp,omitempty"`
loaded bool // Certain methods require configuration to use Load validation.
}
// SetDirectory joins any relative file paths with dir.
// This method writes to config, and it's not concurrency safe.
func (c *Config) SetDirectory(dir string) {
c.GlobalConfig.SetDirectory(dir)
c.AlertingConfig.SetDirectory(dir)
c.TracingConfig.SetDirectory(dir)
for i, file := range c.RuleFiles {
c.RuleFiles[i] = config.JoinDir(dir, file)
}
for i, file := range c.ScrapeConfigFiles {
c.ScrapeConfigFiles[i] = config.JoinDir(dir, file)
}
for _, c := range c.ScrapeConfigs {
c.SetDirectory(dir)
}
for _, c := range c.RemoteWriteConfigs {
c.SetDirectory(dir)
}
for _, c := range c.RemoteReadConfigs {
c.SetDirectory(dir)
}
}
func (c Config) String() string {
b, err := yaml.Marshal(c)
if err != nil {
return fmt.Sprintf("<error creating config string: %s>", err)
}
return string(b)
}
// GetScrapeConfigs returns the read-only, validated scrape configurations including
// the ones from the scrape_config_files.
// This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency).
// This method also assumes the Config was created by Load or LoadFile function, it returns error
// if it was not. We can't re-validate or apply globals here due to races,
// read more https://github.com/prometheus/prometheus/issues/15538.
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
if !c.loaded {
// Programmatic error, we warn before more confusing errors would happen due to lack of the globalization.
return nil, errors.New("scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen")
}
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
jobNames := map[string]string{}
for i, scfg := range c.ScrapeConfigs {
jobNames[scfg.JobName] = "main config file"
scfgs[i] = scfg
}
// Re-read and validate the dynamic scrape config rules.
for _, pat := range c.ScrapeConfigFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
return nil, fmt.Errorf("error retrieving scrape config files for %q: %w", pat, err)
}
for _, filename := range fs {
cfg := ScrapeConfigs{}
content, err := os.ReadFile(filename)
if err != nil {
return nil, fileErr(filename, err)
}
err = yaml.UnmarshalStrict(content, &cfg)
if err != nil {
return nil, fileErr(filename, err)
}
for _, scfg := range cfg.ScrapeConfigs {
if err := scfg.Validate(c.GlobalConfig); err != nil {
return nil, fileErr(filename, err)
}
if f, ok := jobNames[scfg.JobName]; ok {
return nil, fileErr(filename, fmt.Errorf("found multiple scrape configs with job name %q, first found in %s", scfg.JobName, f))
}
jobNames[scfg.JobName] = fmt.Sprintf("%q", filePath(filename))
scfg.SetDirectory(filepath.Dir(filename))
scfgs = append(scfgs, scfg)
}
}
}
return scfgs, nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead.
func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultConfig
// We want to set c to the defaults and then overwrite it with the input.
// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
// again, we have to hide it using a type indirection.
type plain Config
if err := unmarshal((*plain)(c)); err != nil {
return err
}
// If a global block was open but empty the default global config is overwritten.
// We have to restore it here.
if c.GlobalConfig.isZero() {
c.GlobalConfig = DefaultGlobalConfig
}
// If a runtime block was open but empty the default runtime config is overwritten.
// We have to restore it here.
if c.Runtime.isZero() {
c.Runtime = DefaultRuntimeConfig
}
for _, rf := range c.RuleFiles {
if !patRulePath.MatchString(rf) {
return fmt.Errorf("invalid rule file path %q", rf)
}
}
for _, sf := range c.ScrapeConfigFiles {
if !patRulePath.MatchString(sf) {
return fmt.Errorf("invalid scrape config file path %q", sf)
}
}
// Do global overrides and validation.
jobNames := map[string]struct{}{}
for _, scfg := range c.ScrapeConfigs {
if err := scfg.Validate(c.GlobalConfig); err != nil {
return err
}
if _, ok := jobNames[scfg.JobName]; ok {
return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
}
jobNames[scfg.JobName] = struct{}{}
}
if err := c.AlertingConfig.Validate(c.GlobalConfig.MetricNameValidationScheme); err != nil {
return err
}
rwNames := map[string]struct{}{}
for _, rwcfg := range c.RemoteWriteConfigs {
if rwcfg == nil {
return errors.New("empty or null remote write config section")
}
// Skip empty names, we fill their name with their config hash in remote write code.
if _, ok := rwNames[rwcfg.Name]; ok && rwcfg.Name != "" {
return fmt.Errorf("found multiple remote write configs with job name %q", rwcfg.Name)
}
if err := rwcfg.Validate(c.GlobalConfig.MetricNameValidationScheme); err != nil {
return err
}
rwNames[rwcfg.Name] = struct{}{}
}
rrNames := map[string]struct{}{}
for _, rrcfg := range c.RemoteReadConfigs {
if rrcfg == nil {
return errors.New("empty or null remote read config section")
}
// Skip empty names, we fill their name with their config hash in remote read code.
if _, ok := rrNames[rrcfg.Name]; ok && rrcfg.Name != "" {
return fmt.Errorf("found multiple remote read configs with job name %q", rrcfg.Name)
}
rrNames[rrcfg.Name] = struct{}{}
}
return nil
}
// GlobalConfig configures values that are used across other configuration
// objects.
type GlobalConfig struct {
// How frequently to scrape targets by default.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The default timeout when scraping targets.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The protocols to negotiate during a scrape. It tells clients what
// protocol are accepted by Prometheus and with what weight (most wanted is first).
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
// How frequently to evaluate rules by default.
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
// Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"`
// File to which PromQL queries are logged.
QueryLogFile string `yaml:"query_log_file,omitempty"`
// File to which scrape failures are logged.
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
// The labels to add to any timeseries that this Prometheus instance scrapes.
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
// An uncompressed response body larger than this many bytes will cause the
// scrape to fail. 0 means no limit.
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
// More than this many samples post metric-relabeling will cause the scrape to
// fail. 0 means no limit.
SampleLimit uint `yaml:"sample_limit,omitempty"`
// More than this many targets after the target relabeling will cause the
// scrapes to fail. 0 means no limit.
TargetLimit uint `yaml:"target_limit,omitempty"`
// More than this many labels post metric-relabeling will cause the scrape to
// fail. 0 means no limit.
LabelLimit uint `yaml:"label_limit,omitempty"`
// More than this label name length post metric-relabeling will cause the
// scrape to fail. 0 means no limit.
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
// More than this label value length post metric-relabeling will cause the
// scrape to fail. 0 means no limit.
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
// Keep no more than this many dropped targets per job.
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
// have a value if a GlobalConfig is created programmatically.
MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"`
// Metric name escaping mode to request through content negotiation. Can be
// blank in config files but must have a value if a ScrapeConfig is created
// programmatically.
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
// Whether to scrape native histograms.
ScrapeNativeHistograms *bool `yaml:"scrape_native_histograms,omitempty"`
// Whether to convert all scraped classic histograms into native histograms with custom buckets.
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
// Whether to enable additional scrape metrics.
// When enabled, Prometheus stores samples for scrape_timeout_seconds,
// scrape_sample_limit, and scrape_body_size_bytes.
ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
}
// ScrapeProtocol represents supported protocol for scraping metrics.
type ScrapeProtocol string
// Validate returns error if given scrape protocol is not supported.
func (s ScrapeProtocol) Validate() error {
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
return fmt.Errorf("unknown scrape protocol %v, supported: %v",
s, func() (ret []string) {
for k := range ScrapeProtocolsHeaders {
ret = append(ret, string(k))
}
sort.Strings(ret)
return ret
}())
}
return nil
}
// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol.
func (s ScrapeProtocol) HeaderMediaType() string {
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
return ""
}
mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s])
if err != nil {
return ""
}
return mediaType
}
var (
PrometheusProto ScrapeProtocol = "PrometheusProto"
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0"
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
PrometheusText0_0_4: "text/plain;version=0.0.4",
PrometheusText1_0_0: "text/plain;version=1.0.0",
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
}
// DefaultScrapeProtocols is the set of scrape protocols that will be proposed
// to scrape target, ordered by priority.
DefaultScrapeProtocols = []ScrapeProtocol{
OpenMetricsText1_0_0,
OpenMetricsText0_0_1,
PrometheusText1_0_0,
PrometheusText0_0_4,
}
// DefaultProtoFirstScrapeProtocols is like DefaultScrapeProtocols, but it
// favors protobuf Prometheus exposition format.
// Used by default by the "scrape_native_histograms" option and for certain
// feature-flags like "created-timestamp-zero-ingestion".
DefaultProtoFirstScrapeProtocols = []ScrapeProtocol{
PrometheusProto,
OpenMetricsText1_0_0,
OpenMetricsText0_0_1,
PrometheusText1_0_0,
PrometheusText0_0_4,
}
)
// validateAcceptScrapeProtocols return errors if we see problems with accept scrape protocols option.
func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error {
if len(sps) == 0 {
return errors.New("scrape_protocols cannot be empty")
}
dups := map[string]struct{}{}
for _, sp := range sps {
if _, ok := dups[strings.ToLower(string(sp))]; ok {
return fmt.Errorf("duplicated protocol in scrape_protocols, got %v", sps)
}
if err := sp.Validate(); err != nil {
return fmt.Errorf("scrape_protocols: %w", err)
}
dups[strings.ToLower(string(sp))] = struct{}{}
}
return nil
}
// SetDirectory joins any relative file paths with dir.
func (c *GlobalConfig) SetDirectory(dir string) {
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
gc := &GlobalConfig{}
type plain GlobalConfig
if err := unmarshal((*plain)(gc)); err != nil {
return err
}
switch gc.MetricNameValidationScheme {
case model.UTF8Validation, model.LegacyValidation:
default:
gc.MetricNameValidationScheme = DefaultGlobalConfig.MetricNameValidationScheme
}
if err := gc.ExternalLabels.Validate(func(l labels.Label) error {
if !gc.MetricNameValidationScheme.IsValidLabelName(l.Name) {
return fmt.Errorf("%q is not a valid label name", l.Name)
}
if !model.LabelValue(l.Value).IsValid() {
return fmt.Errorf("%q is not a valid label value", l.Value)
}
return nil
}); err != nil {
return err
}
// First set the correct scrape interval, then check that the timeout
// (inferred or explicit) is not greater than that.
if gc.ScrapeInterval == 0 {
gc.ScrapeInterval = DefaultGlobalConfig.ScrapeInterval
}
if gc.ScrapeTimeout > gc.ScrapeInterval {
return errors.New("global scrape timeout greater than scrape interval")
}
if gc.ScrapeTimeout == 0 {
gc.ScrapeTimeout = min(DefaultGlobalConfig.ScrapeTimeout, gc.ScrapeInterval)
}
if gc.EvaluationInterval == 0 {
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
}
if gc.ScrapeNativeHistograms == nil {
gc.ScrapeNativeHistograms = DefaultGlobalConfig.ScrapeNativeHistograms
}
if gc.ExtraScrapeMetrics == nil {
gc.ExtraScrapeMetrics = DefaultGlobalConfig.ExtraScrapeMetrics
}
if gc.ScrapeProtocols == nil {
if DefaultGlobalConfig.ScrapeProtocols != nil {
// This is the case where the defaults are set due to a feature flag.
// E.g. if the created-timestamp-zero-ingestion feature flag is
// used.
gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
}
// Otherwise, we leave ScrapeProtocols at nil for now. In the
// per-job scrape config, we have to recognize the unset case to
// correctly set the default depending on the local value of
// ScrapeNativeHistograms.
}
if gc.ScrapeProtocols != nil {
// Only validate if not-nil at this point.
if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
return fmt.Errorf("%w for global config", err)
}
}
*c = *gc
return nil
}
// isZero returns true iff the global config is the zero value.
func (c *GlobalConfig) isZero() bool {
return c.ExternalLabels.IsEmpty() &&
c.ScrapeInterval == 0 &&
c.ScrapeTimeout == 0 &&
c.EvaluationInterval == 0 &&
c.RuleQueryOffset == 0 &&
c.QueryLogFile == "" &&
c.ScrapeFailureLogFile == "" &&
c.ScrapeProtocols == nil &&
c.ScrapeNativeHistograms == nil &&
!c.ConvertClassicHistogramsToNHCB &&
!c.AlwaysScrapeClassicHistograms &&
c.BodySizeLimit == 0 &&
c.SampleLimit == 0 &&
c.TargetLimit == 0 &&
c.LabelLimit == 0 &&
c.LabelNameLengthLimit == 0 &&
c.LabelValueLengthLimit == 0 &&
c.KeepDroppedTargets == 0 &&
c.MetricNameValidationScheme == model.UnsetValidation &&
c.MetricNameEscapingScheme == "" &&
c.ExtraScrapeMetrics == nil
}
const DefaultGoGCPercentage = 75
// RuntimeConfig configures the values for the process behavior.
type RuntimeConfig struct {
// The Go garbage collection target percentage.
GoGC int `yaml:"gogc,omitempty"`
// Below are guidelines for adding a new field:
//
// For config that shouldn't change after startup, you might want to use
// flags https://prometheus.io/docs/prometheus/latest/command-line/prometheus/.
//
// Consider when the new field is first applied: at the very beginning of instance
// startup, after the TSDB is loaded etc. See https://github.com/prometheus/prometheus/pull/16491
// for an example.
//
// Provide a test covering various scenarios: empty config file, empty or incomplete runtime
// config block, precedence over other inputs (e.g., env vars, if applicable) etc.
// See TestRuntimeGOGCConfig (or https://github.com/prometheus/prometheus/pull/15238).
// The test should also verify behavior on reloads, since this config should be
// adjustable at runtime.
}
// isZero returns true iff the global config is the zero value.
func (c *RuntimeConfig) isZero() bool {
return c.GoGC == 0
}
type ScrapeConfigs struct {
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
}
// ScrapeConfig configures a scraping unit for Prometheus.
type ScrapeConfig struct {
// The job name to which the job label is set by default.
JobName string `yaml:"job_name"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `yaml:"honor_labels,omitempty"`
// Indicator whether the scraped timestamps should be respected.
HonorTimestamps bool `yaml:"honor_timestamps"`
// Indicator whether to track the staleness of the scraped timestamps.
TrackTimestampsStaleness bool `yaml:"track_timestamps_staleness"`
// A set of query parameters with which the target is scraped.
Params url.Values `yaml:"params,omitempty"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The protocols to negotiate during a scrape. It tells clients what
// protocol are accepted by Prometheus and with what preference (most wanted is first).
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
// The fallback protocol to use if the Content-Type provided by the target
// is not provided, blank, or not one of the expected values.
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
// Whether to scrape native histograms.
ScrapeNativeHistograms *bool `yaml:"scrape_native_histograms,omitempty"`
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
AlwaysScrapeClassicHistograms *bool `yaml:"always_scrape_classic_histograms,omitempty"`
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
ConvertClassicHistogramsToNHCB *bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
// File to which scrape failures are logged.
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// Indicator whether to request compressed response from the target.
EnableCompression bool `yaml:"enable_compression"`
// An uncompressed response body larger than this many bytes will cause the
// scrape to fail. 0 means no limit.
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
// More than this many samples post metric-relabeling will cause the scrape to
// fail. 0 means no limit.
SampleLimit uint `yaml:"sample_limit,omitempty"`
// More than this many targets after the target relabeling will cause the
// scrapes to fail. 0 means no limit.
TargetLimit uint `yaml:"target_limit,omitempty"`
// More than this many labels post metric-relabeling will cause the scrape to
// fail. 0 means no limit.
LabelLimit uint `yaml:"label_limit,omitempty"`
// More than this label name length post metric-relabeling will cause the
// scrape to fail. 0 means no limit.
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
// More than this label value length post metric-relabeling will cause the
// scrape to fail. 0 means no limit.
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
// If there are more than this many buckets in a native histogram,
// buckets will be merged to stay within the limit.
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
// If the growth factor of one bucket to the next is smaller than this,
// buckets will be merged to increase the factor sufficiently.
NativeHistogramMinBucketFactor float64 `yaml:"native_histogram_min_bucket_factor,omitempty"`
// Keep no more than this many dropped targets per job.
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
// have a value if a ScrapeConfig is created programmatically.
MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"`
// Metric name escaping mode to request through content negotiation. Can be
// blank in config files but must have a value if a ScrapeConfig is created
// programmatically.
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
// Whether to enable additional scrape metrics.
// When enabled, Prometheus stores samples for scrape_timeout_seconds,
// scrape_sample_limit, and scrape_body_size_bytes.
// If not set (nil), inherits the value from the global configuration.
ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/config/config_test.go | config/config_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"crypto/tls"
"fmt"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/alecthomas/units"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/grafana/regexp"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
"go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/aws"
"github.com/prometheus/prometheus/discovery/azure"
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/eureka"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/hetzner"
"github.com/prometheus/prometheus/discovery/http"
"github.com/prometheus/prometheus/discovery/ionos"
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/linode"
"github.com/prometheus/prometheus/discovery/marathon"
"github.com/prometheus/prometheus/discovery/moby"
"github.com/prometheus/prometheus/discovery/nomad"
"github.com/prometheus/prometheus/discovery/openstack"
"github.com/prometheus/prometheus/discovery/ovhcloud"
"github.com/prometheus/prometheus/discovery/puppetdb"
"github.com/prometheus/prometheus/discovery/scaleway"
"github.com/prometheus/prometheus/discovery/stackit"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/discovery/triton"
"github.com/prometheus/prometheus/discovery/uyuni"
"github.com/prometheus/prometheus/discovery/vultr"
"github.com/prometheus/prometheus/discovery/xds"
"github.com/prometheus/prometheus/discovery/zookeeper"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/util/testutil"
)
func mustParseURL(u string) *config.URL {
parsed, err := url.Parse(u)
if err != nil {
panic(err)
}
return &config.URL{URL: parsed}
}
const (
globBodySizeLimit = 15 * units.MiB
globSampleLimit = 1500
globTargetLimit = 30
globLabelLimit = 30
globLabelNameLengthLimit = 200
globLabelValueLengthLimit = 200
globalGoGC = 42
globScrapeFailureLogFile = "testdata/fail.log"
)
var expectedConf = &Config{
loaded: true,
GlobalConfig: GlobalConfig{
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EvaluationInterval: model.Duration(30 * time.Second),
QueryLogFile: "testdata/query.log",
ScrapeFailureLogFile: globScrapeFailureLogFile,
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistogramsToNHCB: false,
ExtraScrapeMetrics: boolPtr(false),
MetricNameValidationScheme: model.UTF8Validation,
},
Runtime: RuntimeConfig{
GoGC: globalGoGC,
},
RuleFiles: []string{
filepath.FromSlash("testdata/first.rules"),
filepath.FromSlash("testdata/my/*.rules"),
},
RemoteWriteConfigs: []*RemoteWriteConfig{
{
URL: mustParseURL("http://remote1/push"),
ProtobufMessage: remoteapi.WriteV1MessageType,
RemoteTimeout: model.Duration(30 * time.Second),
Name: "drop_expensive",
WriteRelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__name__"},
Separator: ";",
Regex: relabel.MustNewRegexp("expensive.*"),
Replacement: "$1",
Action: relabel.Drop,
NameValidationScheme: model.UTF8Validation,
},
},
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.HTTPClientConfig{
OAuth2: &config.OAuth2{
ClientID: "123",
ClientSecret: "456",
TokenURL: "http://remote1/auth",
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
},
FollowRedirects: true,
EnableHTTP2: false,
},
},
{
URL: mustParseURL("http://remote2/push"),
ProtobufMessage: remoteapi.WriteV2MessageType,
RemoteTimeout: model.Duration(30 * time.Second),
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
Name: "rw_tls",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: false,
},
Headers: map[string]string{"name": "value"},
},
},
OTLPConfig: OTLPConfig{
PromoteResourceAttributes: []string{
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
},
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
LabelNameUnderscoreSanitization: true,
LabelNamePreserveMultipleUnderscores: true,
},
RemoteReadConfigs: []*RemoteReadConfig{
{
URL: mustParseURL("http://remote1/read"),
RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
ReadRecent: true,
Name: "default",
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: false,
},
FilterExternalLabels: true,
},
{
URL: mustParseURL("http://remote3/read"),
RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
ReadRecent: false,
Name: "read_special",
RequiredMatchers: model.LabelSet{"job": "special"},
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
FilterExternalLabels: true,
},
},
ScrapeConfigs: []*ScrapeConfig{
{
JobName: "prometheus",
HonorLabels: true,
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFallbackProtocol: PrometheusText0_0_4,
ScrapeFailureLogFile: "testdata/fail_prom.log",
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
CredentialsFile: filepath.FromSlash("testdata/valid_token_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
TLSConfig: config.TLSConfig{
MinVersion: config.TLSVersion(tls.VersionTLS10),
},
HTTPHeaders: &config.Headers{
Headers: map[string]config.Header{
"foo": {
Values: []string{"foobar"},
Secrets: []config.Secret{"bar", "foo"},
Files: []string{filepath.FromSlash("testdata/valid_password_file")},
},
},
},
},
ServiceDiscoveryConfigs: discovery.Configs{
&file.SDConfig{
Files: []string{"testdata/foo/*.slow.json", "testdata/foo/*.slow.yml", "testdata/single/file.yml"},
RefreshInterval: model.Duration(10 * time.Minute),
},
&file.SDConfig{
Files: []string{"testdata/bar/*.yaml"},
RefreshInterval: model.Duration(5 * time.Minute),
},
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
{model.AddressLabel: "localhost:9191"},
},
Labels: model.LabelSet{
"my": "label",
"your": "label",
},
Source: "0",
},
},
},
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"job", "__meta_dns_name"},
TargetLabel: "job",
Separator: ";",
Regex: relabel.MustNewRegexp("(.*)some-[regex]"),
Replacement: "foo-${1}",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"abc"},
TargetLabel: "cde",
Separator: ";",
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
TargetLabel: "abc",
Separator: ";",
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: "static",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
TargetLabel: "abc",
Separator: ";",
Regex: relabel.MustNewRegexp(""),
Replacement: "static",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"foo"},
TargetLabel: "abc",
Action: relabel.KeepEqual,
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"foo"},
TargetLabel: "abc",
Action: relabel.DropEqual,
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
NameValidationScheme: model.UTF8Validation,
},
},
},
{
JobName: "service-x",
HonorTimestamps: true,
ScrapeInterval: model.Duration(50 * time.Second),
ScrapeTimeout: model.Duration(5 * time.Second),
EnableCompression: true,
BodySizeLimit: 10 * units.MiB,
SampleLimit: 1000,
TargetLimit: 35,
LabelLimit: 35,
LabelNameLengthLimit: 210,
LabelValueLengthLimit: 210,
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "admin_name",
Password: "multiline\nmysecret\ntest",
},
FollowRedirects: true,
EnableHTTP2: true,
},
MetricsPath: "/my_path",
Scheme: "https",
ServiceDiscoveryConfigs: discovery.Configs{
&dns.SDConfig{
Names: []string{
"first.dns.address.domain.com",
"second.dns.address.domain.com",
},
RefreshInterval: model.Duration(15 * time.Second),
Type: "SRV",
},
&dns.SDConfig{
Names: []string{
"first.dns.address.domain.com",
},
RefreshInterval: model.Duration(30 * time.Second),
Type: "SRV",
},
},
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"job"},
Regex: relabel.MustNewRegexp("(.*)some-[regex]"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Drop,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"__address__"},
TargetLabel: "__tmp_hash",
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Modulus: 8,
Separator: ";",
Action: relabel.HashMod,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"__tmp_hash"},
Regex: relabel.MustNewRegexp("1"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Keep,
NameValidationScheme: model.UTF8Validation,
},
{
Regex: relabel.MustNewRegexp("1"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.LabelMap,
NameValidationScheme: model.UTF8Validation,
},
{
Regex: relabel.MustNewRegexp("d"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.LabelDrop,
NameValidationScheme: model.UTF8Validation,
},
{
Regex: relabel.MustNewRegexp("k"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.LabelKeep,
NameValidationScheme: model.UTF8Validation,
},
},
MetricRelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__name__"},
Regex: relabel.MustNewRegexp("expensive_metric.*"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Drop,
NameValidationScheme: model.UTF8Validation,
},
},
},
{
JobName: "service-y",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&consul.SDConfig{
Server: "localhost:1234",
PathPrefix: "/consul",
Token: "mysecret",
Services: []string{"nginx", "cache", "mysql"},
ServiceTags: []string{"canary", "v1"},
NodeMeta: map[string]string{"rack": "123"},
TagSeparator: consul.DefaultSDConfig.TagSeparator,
Scheme: "https",
RefreshInterval: consul.DefaultSDConfig.RefreshInterval,
AllowStale: true,
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
CAFile: filepath.FromSlash("testdata/valid_ca_file"),
InsecureSkipVerify: false,
},
FollowRedirects: true,
EnableHTTP2: true,
},
},
},
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__meta_sd_consul_tags"},
Regex: relabel.MustNewRegexp("label:([^=]+)=([^,]+)"),
Separator: ",",
TargetLabel: "${1}",
Replacement: "${2}",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
},
},
{
JobName: "service-z",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "mysecret",
},
FollowRedirects: true,
EnableHTTP2: true,
},
},
{
JobName: "service-kubernetes",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&kubernetes.SDConfig{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "myusername",
Password: "mysecret",
},
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
NamespaceDiscovery: kubernetes.NamespaceDiscovery{},
},
},
},
{
JobName: "service-kubernetes-namespaces",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "myusername",
PasswordFile: filepath.FromSlash("testdata/valid_password_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: discovery.Configs{
&kubernetes.SDConfig{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
NamespaceDiscovery: kubernetes.NamespaceDiscovery{
Names: []string{
"default",
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-kuma",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&xds.KumaSDConfig{
Server: "http://kuma-control-plane.kuma-system.svc:5676",
ClientID: "main-prometheus",
HTTPClientConfig: config.DefaultHTTPClientConfig,
RefreshInterval: model.Duration(15 * time.Second),
FetchTimeout: model.Duration(2 * time.Minute),
},
},
},
{
JobName: "service-marathon",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&marathon.SDConfig{
Servers: []string{
"https://marathon.example.com:443",
},
RefreshInterval: model.Duration(30 * time.Second),
AuthToken: "mysecret",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
},
},
},
{
JobName: "service-nomad",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&nomad.SDConfig{
AllowStale: true,
Namespace: "default",
RefreshInterval: model.Duration(60 * time.Second),
Region: "global",
Server: "http://localhost:4646",
TagSeparator: ",",
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
},
},
},
{
JobName: "service-ec2",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&aws.EC2SDConfig{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
Filters: []*aws.EC2Filter{
{
Name: "tag:environment",
Values: []string{"prod"},
},
{
Name: "tag:service",
Values: []string{"web", "db"},
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-lightsail",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&aws.LightsailSDConfig{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-azure",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | true |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/config/reload_test.go | config/reload_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func TestGenerateChecksum(t *testing.T) {
tmpDir := t.TempDir()
// Define paths for the temporary files.
yamlFilePath := filepath.Join(tmpDir, "test.yml")
ruleFile := "rule_file.yml"
ruleFilePath := filepath.Join(tmpDir, ruleFile)
scrapeConfigFile := "scrape_config.yml"
scrapeConfigFilePath := filepath.Join(tmpDir, scrapeConfigFile)
// Define initial and modified content for the files.
originalRuleContent := "groups:\n- name: example\n rules:\n - alert: ExampleAlert"
modifiedRuleContent := "groups:\n- name: example\n rules:\n - alert: ModifiedAlert"
originalScrapeConfigContent := "scrape_configs:\n- job_name: example"
modifiedScrapeConfigContent := "scrape_configs:\n- job_name: modified_example"
testCases := []struct {
name string
ruleFilePath string
scrapeConfigFilePath string
}{
{
name: "Auto reload using relative path.",
ruleFilePath: ruleFile,
scrapeConfigFilePath: scrapeConfigFile,
},
{
name: "Auto reload using absolute path.",
ruleFilePath: ruleFilePath,
scrapeConfigFilePath: scrapeConfigFilePath,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Define YAML content referencing the rule and scrape config files.
yamlContent := fmt.Sprintf(`
rule_files:
- %s
scrape_config_files:
- %s
`, tc.ruleFilePath, tc.scrapeConfigFilePath)
// Write initial content to files.
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
// Generate the original checksum.
originalChecksum := calculateChecksum(t, yamlFilePath)
t.Run("Rule File Change", func(t *testing.T) {
// Modify the rule file.
require.NoError(t, os.WriteFile(ruleFilePath, []byte(modifiedRuleContent), 0o644))
// Checksum should change.
modifiedChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, modifiedChecksum)
// Revert the rule file.
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Scrape Config Change", func(t *testing.T) {
// Modify the scrape config file.
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(modifiedScrapeConfigContent), 0o644))
// Checksum should change.
modifiedChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, modifiedChecksum)
// Revert the scrape config file.
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Rule File Deletion", func(t *testing.T) {
// Delete the rule file.
require.NoError(t, os.Remove(ruleFilePath))
// Checksum should change.
deletedChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, deletedChecksum)
// Restore the rule file.
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Scrape Config Deletion", func(t *testing.T) {
// Delete the scrape config file.
require.NoError(t, os.Remove(scrapeConfigFilePath))
// Checksum should change.
deletedChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, deletedChecksum)
// Restore the scrape config file.
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Main File Change", func(t *testing.T) {
// Modify the main YAML file.
modifiedYamlContent := fmt.Sprintf(`
global:
scrape_interval: 3s
rule_files:
- %s
scrape_config_files:
- %s
`, tc.ruleFilePath, tc.scrapeConfigFilePath)
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
// Checksum should change.
modifiedChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, modifiedChecksum)
// Revert the main YAML file.
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Rule File Removed from YAML Config", func(t *testing.T) {
// Modify the YAML content to remove the rule file.
modifiedYamlContent := fmt.Sprintf(`
scrape_config_files:
- %s
`, tc.scrapeConfigFilePath)
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
// Checksum should change.
modifiedChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, modifiedChecksum)
// Revert the YAML content.
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Scrape Config Removed from YAML Config", func(t *testing.T) {
// Modify the YAML content to remove the scrape config file.
modifiedYamlContent := fmt.Sprintf(`
rule_files:
- %s
`, tc.ruleFilePath)
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
// Checksum should change.
modifiedChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, modifiedChecksum)
// Revert the YAML content.
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Empty Rule File", func(t *testing.T) {
// Write an empty rule file.
require.NoError(t, os.WriteFile(ruleFilePath, []byte(""), 0o644))
// Checksum should change.
emptyChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, emptyChecksum)
// Restore the rule file.
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
t.Run("Empty Scrape Config File", func(t *testing.T) {
// Write an empty scrape config file.
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(""), 0o644))
// Checksum should change.
emptyChecksum := calculateChecksum(t, yamlFilePath)
require.NotEqual(t, originalChecksum, emptyChecksum)
// Restore the scrape config file.
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
// Checksum should return to the original.
revertedChecksum := calculateChecksum(t, yamlFilePath)
require.Equal(t, originalChecksum, revertedChecksum)
})
})
}
}
// calculateChecksum generates a checksum for the given YAML file path.
func calculateChecksum(t *testing.T, yamlFilePath string) string {
checksum, err := GenerateChecksum(yamlFilePath)
require.NoError(t, err)
require.NotEmpty(t, checksum)
return checksum
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/config/reload.go | config/reload.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
promconfig "github.com/prometheus/common/config"
"go.yaml.in/yaml/v2"
)
type ExternalFilesConfig struct {
RuleFiles []string `yaml:"rule_files"`
ScrapeConfigFiles []string `yaml:"scrape_config_files"`
}
// GenerateChecksum generates a checksum of the YAML file and the files it references.
func GenerateChecksum(yamlFilePath string) (string, error) {
hash := sha256.New()
yamlContent, err := os.ReadFile(yamlFilePath)
if err != nil {
return "", fmt.Errorf("error reading YAML file: %w", err)
}
_, err = hash.Write(yamlContent)
if err != nil {
return "", fmt.Errorf("error writing YAML file to hash: %w", err)
}
var config ExternalFilesConfig
if err := yaml.Unmarshal(yamlContent, &config); err != nil {
return "", fmt.Errorf("error unmarshalling YAML: %w", err)
}
dir := filepath.Dir(yamlFilePath)
for i, file := range config.RuleFiles {
config.RuleFiles[i] = promconfig.JoinDir(dir, file)
}
for i, file := range config.ScrapeConfigFiles {
config.ScrapeConfigFiles[i] = promconfig.JoinDir(dir, file)
}
files := map[string][]string{
"r": config.RuleFiles, // "r" for rule files
"s": config.ScrapeConfigFiles, // "s" for scrape config files
}
for _, prefix := range []string{"r", "s"} {
for _, pattern := range files[prefix] {
matchingFiles, err := filepath.Glob(pattern)
if err != nil {
return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err)
}
for _, file := range matchingFiles {
// Write prefix to the hash ("r" or "s") followed by \0, then
// the file path.
_, err = hash.Write([]byte(prefix + "\x00" + file + "\x00"))
if err != nil {
return "", fmt.Errorf("error writing %q path to hash: %w", file, err)
}
// Read and hash the content of the file.
content, err := os.ReadFile(file)
if err != nil {
return "", fmt.Errorf("error reading file %s: %w", file, err)
}
_, err = hash.Write(append(content, []byte("\x00")...))
if err != nil {
return "", fmt.Errorf("error writing %q content to hash: %w", file, err)
}
}
}
}
return hex.EncodeToString(hash.Sum(nil)), nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/custom-sd/adapter-usage/main.go | documentation/examples/custom-sd/adapter-usage/main.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"log/slog"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
prom_discovery "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/documentation/examples/custom-sd/adapter"
"github.com/prometheus/prometheus/util/strutil"
)
var (
a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.")
outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String()
listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String()
logger *slog.Logger
// addressLabel is the name for the label containing a target's address.
addressLabel = model.MetaLabelPrefix + "consul_address"
// nodeLabel is the name for the label containing a target's node name.
nodeLabel = model.MetaLabelPrefix + "consul_node"
// tagsLabel is the name of the label containing the tags assigned to the target.
tagsLabel = model.MetaLabelPrefix + "consul_tags"
// serviceAddressLabel is the name of the label containing the (optional) service address.
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
// servicePortLabel is the name of the label containing the service port.
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
// serviceIDLabel is the name of the label containing the service ID.
serviceIDLabel = model.MetaLabelPrefix + "consul_service_id"
)
// CatalogService is copied from https://github.com/hashicorp/consul/blob/master/api/catalog.go
// this struct represents the response from a /service/<service-name> request.
// Consul License: https://github.com/hashicorp/consul/blob/master/LICENSE
type CatalogService struct {
ID string
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
NodeMeta map[string]string
ServiceID string
ServiceName string
ServiceAddress string
ServiceTags []string
ServicePort int
ServiceEnableTagOverride bool
CreateIndex uint64
ModifyIndex uint64
}
// Note: create a config struct for your custom SD type here.
type sdConfig struct {
Address string
TagSeparator string
RefreshInterval int
}
// Note: This is the struct with your implementation of the Discoverer interface (see Run function).
// Discovery retrieves target information from a Consul server and updates them via watches.
type discovery struct {
address string
refreshInterval int
tagSeparator string
logger *slog.Logger
oldSourceList map[string]bool
}
func (*discovery) parseServiceNodes(resp *http.Response, name string) (*targetgroup.Group, error) {
var nodes []*CatalogService
tgroup := targetgroup.Group{
Source: name,
Labels: make(model.LabelSet),
}
defer func() {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}()
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
err = json.Unmarshal(b, &nodes)
if err != nil {
return &tgroup, err
}
tgroup.Targets = make([]model.LabelSet, 0, len(nodes))
for _, node := range nodes {
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
tags := "," + strings.Join(node.ServiceTags, ",") + ","
// If the service address is not empty it should be used instead of the node address
// since the service may be registered remotely through a different node.
var addr string
if node.ServiceAddress != "" {
addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort))
} else {
addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort))
}
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}
labels := model.LabelSet{
model.AddressLabel: model.LabelValue(addr),
model.LabelName(addressLabel): model.LabelValue(node.Address),
model.LabelName(nodeLabel): model.LabelValue(node.Node),
model.LabelName(tagsLabel): model.LabelValue(tags),
model.LabelName(serviceAddressLabel): model.LabelValue(node.ServiceAddress),
model.LabelName(servicePortLabel): model.LabelValue(strconv.Itoa(node.ServicePort)),
model.LabelName(serviceIDLabel): model.LabelValue(node.ServiceID),
}
tgroup.Labels = labels
// Add all key/value pairs from the node's metadata as their own labels.
for k, v := range node.NodeMeta {
name := strutil.SanitizeLabelName(k)
tgroup.Labels[model.LabelName(model.MetaLabelPrefix+name)] = model.LabelValue(v)
}
tgroup.Targets = append(tgroup.Targets, target)
}
return &tgroup, nil
}
// Note: you must implement this function for your discovery implementation as part of the
// Discoverer interface. Here you should query your SD for it's list of known targets, determine
// which of those targets you care about (for example, which of Consuls known services do you want
// to scrape for metrics), and then send those targets as a target.TargetGroup to the ch channel.
func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; {
var srvs map[string][]string
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address))
if err != nil {
d.logger.Error("Error getting services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second)
continue
}
b, err := io.ReadAll(resp.Body)
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
if err != nil {
d.logger.Error("Error reading services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second)
continue
}
err = json.Unmarshal(b, &srvs)
resp.Body.Close()
if err != nil {
d.logger.Error("Error parsing services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second)
continue
}
var tgs []*targetgroup.Group
// Note that we treat errors when querying specific consul services as fatal for this
// iteration of the time.Tick loop. It's better to have some stale targets than an incomplete
// list of targets simply because there may have been a timeout. If the service is actually
// gone as far as consul is concerned, that will be picked up during the next iteration of
// the outer loop.
newSourceList := make(map[string]bool)
for name := range srvs {
if name == "consul" {
continue
}
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/service/%s", d.address, name))
if err != nil {
d.logger.Error("Error getting services nodes", "service", name, "err", err)
break
}
tg, err := d.parseServiceNodes(resp, name)
if err != nil {
d.logger.Error("Error parsing services nodes", "service", name, "err", err)
break
}
tgs = append(tgs, tg)
newSourceList[tg.Source] = true
}
// When targetGroup disappear, send an update with empty targetList.
for key := range d.oldSourceList {
if !newSourceList[key] {
tgs = append(tgs, &targetgroup.Group{
Source: key,
})
}
}
d.oldSourceList = newSourceList
// We're returning all Consul services as a single targetgroup.
ch <- tgs
// Wait for ticker or exit when ctx is closed.
select {
case <-c:
continue
case <-ctx.Done():
return
}
}
}
func newDiscovery(conf sdConfig) (*discovery, error) {
cd := &discovery{
address: conf.Address,
refreshInterval: conf.RefreshInterval,
tagSeparator: conf.TagSeparator,
logger: logger,
oldSourceList: make(map[string]bool),
}
return cd, nil
}
func main() {
a.HelpFlag.Short('h')
_, err := a.Parse(os.Args[1:])
if err != nil {
fmt.Println("err: ", err)
return
}
logger = promslog.New(&promslog.Config{})
ctx := context.Background()
// NOTE: create an instance of your new SD implementation here.
cfg := sdConfig{
TagSeparator: ",",
Address: *listenAddress,
RefreshInterval: 30,
}
disc, err := newDiscovery(cfg)
if err != nil {
fmt.Println("err: ", err)
}
if err != nil {
logger.Error("failed to create discovery metrics", "err", err)
os.Exit(1)
}
reg := prometheus.NewRegistry()
refreshMetrics := prom_discovery.NewRefreshMetrics(reg)
metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics)
if err != nil {
logger.Error("failed to register service discovery metrics", "err", err)
os.Exit(1)
}
sdAdapter := adapter.NewAdapter(ctx, *outputFile, "exampleSD", disc, logger, metrics, reg)
sdAdapter.Run()
<-ctx.Done()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/custom-sd/adapter/adapter.go | documentation/examples/custom-sd/adapter/adapter.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapter
// NOTE: you do not need to edit this file when implementing a custom sd.
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"os"
"path/filepath"
"reflect"
"sort"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
type customSD struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
}
func fingerprint(group *targetgroup.Group) model.Fingerprint {
groupFingerprint := model.LabelSet{}.Fingerprint()
for _, targets := range group.Targets {
groupFingerprint ^= targets.Fingerprint()
}
groupFingerprint ^= group.Labels.Fingerprint()
return groupFingerprint
}
// Adapter runs an unknown service discovery implementation and converts its target groups
// to JSON and writes to a file for file_sd.
type Adapter struct {
ctx context.Context
disc discovery.Discoverer
groups map[string]*customSD
manager *discovery.Manager
output string
name string
logger *slog.Logger
}
func mapToArray(m map[string]*customSD) []customSD {
arr := make([]customSD, 0, len(m))
for _, v := range m {
arr = append(arr, *v)
}
return arr
}
func generateTargetGroups(allTargetGroups map[string][]*targetgroup.Group) map[string]*customSD {
groups := make(map[string]*customSD)
for k, sdTargetGroups := range allTargetGroups {
for _, group := range sdTargetGroups {
newTargets := make([]string, 0)
newLabels := make(map[string]string)
for _, targets := range group.Targets {
for _, target := range targets {
newTargets = append(newTargets, string(target))
}
}
sort.Strings(newTargets)
for name, value := range group.Labels {
newLabels[string(name)] = string(value)
}
sdGroup := customSD{
Targets: newTargets,
Labels: newLabels,
}
// Make a unique key, including group's fingerprint, in case the sd_type (map key) and group.Source is not unique.
groupFingerprint := fingerprint(group)
key := fmt.Sprintf("%s:%s:%s", k, group.Source, groupFingerprint.String())
groups[key] = &sdGroup
}
}
return groups
}
// Parses incoming target groups updates. If the update contains changes to the target groups
// Adapter already knows about, or new target groups, we Marshal to JSON and write to file.
func (a *Adapter) refreshTargetGroups(allTargetGroups map[string][]*targetgroup.Group) {
tempGroups := generateTargetGroups(allTargetGroups)
if !reflect.DeepEqual(a.groups, tempGroups) {
a.groups = tempGroups
err := a.writeOutput()
if err != nil {
a.logger.With("component", "sd-adapter").Error("failed to write output", "err", err)
}
}
}
// Writes JSON formatted targets to output file.
func (a *Adapter) writeOutput() error {
arr := mapToArray(a.groups)
b, _ := json.MarshalIndent(arr, "", " ")
dir, _ := filepath.Split(a.output)
tmpfile, err := os.CreateTemp(dir, "sd-adapter")
if err != nil {
return err
}
defer tmpfile.Close()
_, err = tmpfile.Write(b)
if err != nil {
return err
}
// Close the file immediately for platforms (eg. Windows) that cannot move
// a file while a process is holding a file handle.
tmpfile.Close()
err = os.Rename(tmpfile.Name(), a.output)
if err != nil {
return err
}
return nil
}
func (a *Adapter) runCustomSD(ctx context.Context) {
updates := a.manager.SyncCh()
for {
select {
case <-ctx.Done():
case allTargetGroups, ok := <-updates:
// Handle the case that a target provider exits and closes the channel
// before the context is done.
if !ok {
return
}
a.refreshTargetGroups(allTargetGroups)
}
}
}
// Run starts a Discovery Manager and the custom service discovery implementation.
func (a *Adapter) Run() {
//nolint:errcheck
go a.manager.Run()
a.manager.StartCustomProvider(a.ctx, a.name, a.disc)
go a.runCustomSD(a.ctx)
}
// NewAdapter creates a new instance of Adapter.
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger *slog.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter {
return &Adapter{
ctx: ctx,
disc: d,
groups: make(map[string]*customSD),
manager: discovery.NewManager(ctx, logger, registerer, sdMetrics),
output: file,
name: name,
logger: logger,
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/custom-sd/adapter/adapter_test.go | documentation/examples/custom-sd/adapter/adapter_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapter
import (
"context"
"os"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
// TestGenerateTargetGroups checks that the target is correctly generated.
// It covers the case when the target is empty.
func TestGenerateTargetGroups(t *testing.T) {
testCases := []struct {
title string
targetGroup map[string][]*targetgroup.Group
expectedCustomSD map[string]*customSD
}{
{
title: "Empty targetGroup",
targetGroup: map[string][]*targetgroup.Group{
"customSD": {
{
Source: "Consul",
},
{
Source: "Kubernetes",
},
},
},
expectedCustomSD: map[string]*customSD{
"customSD:Consul:0000000000000000": {
Targets: []string{},
Labels: map[string]string{},
},
"customSD:Kubernetes:0000000000000000": {
Targets: []string{},
Labels: map[string]string{},
},
},
},
{
title: "targetGroup filled",
targetGroup: map[string][]*targetgroup.Group{
"customSD": {
{
Source: "Azure",
Targets: []model.LabelSet{
{
model.AddressLabel: "host1",
},
{
model.AddressLabel: "host2",
},
},
Labels: model.LabelSet{
model.LabelName("__meta_test_label"): model.LabelValue("label_test_1"),
},
},
{
Source: "Openshift",
Targets: []model.LabelSet{
{
model.AddressLabel: "host3",
},
{
model.AddressLabel: "host4",
},
},
Labels: model.LabelSet{
model.LabelName("__meta_test_label"): model.LabelValue("label_test_2"),
},
},
},
},
expectedCustomSD: map[string]*customSD{
"customSD:Azure:282a007a18fadbbb": {
Targets: []string{
"host1",
"host2",
},
Labels: map[string]string{
"__meta_test_label": "label_test_1",
},
},
"customSD:Openshift:281c007a18ea2ad0": {
Targets: []string{
"host3",
"host4",
},
Labels: map[string]string{
"__meta_test_label": "label_test_2",
},
},
},
},
{
title: "Mixed between empty targetGroup and targetGroup filled",
targetGroup: map[string][]*targetgroup.Group{
"customSD": {
{
Source: "GCE",
Targets: []model.LabelSet{
{
model.AddressLabel: "host1",
},
{
model.AddressLabel: "host2",
},
},
Labels: model.LabelSet{
model.LabelName("__meta_test_label"): model.LabelValue("label_test_1"),
},
},
{
Source: "Kubernetes",
Labels: model.LabelSet{
model.LabelName("__meta_test_label"): model.LabelValue("label_test_2"),
},
},
},
},
expectedCustomSD: map[string]*customSD{
"customSD:GCE:282a007a18fadbbb": {
Targets: []string{
"host1",
"host2",
},
Labels: map[string]string{
"__meta_test_label": "label_test_1",
},
},
"customSD:Kubernetes:282e007a18fad483": {
Targets: []string{},
Labels: map[string]string{
"__meta_test_label": "label_test_2",
},
},
},
},
{
title: "Disordered Ips in Alibaba's application management system",
targetGroup: map[string][]*targetgroup.Group{
"cart": {
{
Source: "alibaba",
Targets: []model.LabelSet{
{
model.AddressLabel: "192.168.1.55",
},
{
model.AddressLabel: "192.168.1.44",
},
},
Labels: model.LabelSet{
model.LabelName("__meta_test_label"): model.LabelValue("label_test_1"),
},
},
},
"buy": {
{
Source: "alibaba",
Targets: []model.LabelSet{
{
model.AddressLabel: "192.168.1.22",
},
{
model.AddressLabel: "192.168.1.33",
},
},
Labels: model.LabelSet{
model.LabelName("__meta_test_label"): model.LabelValue("label_test_1"),
},
},
},
},
expectedCustomSD: map[string]*customSD{
"buy:alibaba:21c0d97a1e27e6fe": {
Targets: []string{
"192.168.1.22",
"192.168.1.33",
},
Labels: map[string]string{
"__meta_test_label": "label_test_1",
},
},
"cart:alibaba:1112e97a13b159fa": {
Targets: []string{
"192.168.1.44",
"192.168.1.55",
},
Labels: map[string]string{
"__meta_test_label": "label_test_1",
},
},
},
},
}
for _, testCase := range testCases {
result := generateTargetGroups(testCase.targetGroup)
require.Equal(t, testCase.expectedCustomSD, result)
}
}
// TestWriteOutput checks the adapter can write a file to disk.
func TestWriteOutput(t *testing.T) {
ctx := context.Background()
tmpfile, err := os.CreateTemp("", "sd_adapter_test")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
tmpfile.Close()
require.NoError(t, err)
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics)
require.NoError(t, err)
adapter := NewAdapter(ctx, tmpfile.Name(), "test_sd", nil, nil, sdMetrics, reg)
require.NoError(t, adapter.writeOutput())
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/example_write_adapter/server.go | documentation/examples/remote_storage/example_write_adapter/server.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"log"
"net/http"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage/remote"
)
func main() {
http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
enc := r.Header.Get("Content-Encoding")
if enc == "" {
http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType)
return
}
if enc != "snappy" {
http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType)
return
}
contentType := r.Header.Get("Content-Type")
if contentType == "" {
http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType)
}
defer func() { _ = r.Body.Close() }()
// Very simplistic content parsing, see
// storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example.
switch contentType {
case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest":
req, err := remote.DecodeWriteRequest(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
printV1(req)
case "application/x-protobuf;proto=io.prometheus.write.v2.Request":
req, err := remote.DecodeWriteV2Request(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = printV2(req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
default:
msg := fmt.Sprintf("Unknown remote write content type: %s", contentType)
fmt.Println(msg)
http.Error(w, msg, http.StatusBadRequest)
}
})
log.Fatal(http.ListenAndServe(":1234", nil))
}
func printV1(req *prompb.WriteRequest) {
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
fmt.Println(ts.ToLabels(&b, nil))
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, nil)
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
h := hp.ToFloatHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
continue
}
h := hp.ToIntHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
}
func printV2(req *writev2.Request) error {
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
l, err := ts.ToLabels(&b, req.Symbols)
if err != nil {
return err
}
m := ts.ToMetadata(req.Symbols)
fmt.Println(l, m)
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, ep := range ts.Exemplars {
e, err := ep.ToExemplar(&b, req.Symbols)
if err != nil {
return err
}
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
h := hp.ToFloatHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
continue
}
h := hp.ToIntHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/main.go | documentation/examples/remote_storage/remote_storage_adapter/main.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The main package for the Prometheus server executable.
package main
import (
"fmt"
"io"
"log/slog"
"net/http"
_ "net/http/pprof"
"net/url"
"os"
"path/filepath"
"sync"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/promslog/flag"
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite"
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb"
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/opentsdb"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote"
)
type config struct {
graphiteAddress string
graphiteTransport string
graphitePrefix string
opentsdbURL string
influxdbURL string
bucket string
organization string
influxdbAuthToken string
remoteTimeout time.Duration
listenAddr string
telemetryPath string
promslogConfig promslog.Config
}
var (
receivedSamples = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "received_samples_total",
Help: "Total number of received samples.",
},
)
sentSamples = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "sent_samples_total",
Help: "Total number of processed samples sent to remote storage.",
},
[]string{"remote"},
)
failedSamples = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "failed_samples_total",
Help: "Total number of processed samples which failed on send to remote storage.",
},
[]string{"remote"},
)
sentBatchDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "sent_batch_duration_seconds",
Help: "Duration of sample batch send calls to the remote storage.",
Buckets: prometheus.DefBuckets,
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
},
[]string{"remote"},
)
)
func init() {
prometheus.MustRegister(receivedSamples)
prometheus.MustRegister(sentSamples)
prometheus.MustRegister(failedSamples)
prometheus.MustRegister(sentBatchDuration)
}
func main() {
cfg := parseFlags()
http.Handle(cfg.telemetryPath, promhttp.Handler())
logger := promslog.New(&cfg.promslogConfig)
writers, readers := buildClients(logger, cfg)
if err := serve(logger, cfg.listenAddr, writers, readers); err != nil {
logger.Error("Failed to listen", "addr", cfg.listenAddr, "err", err)
os.Exit(1)
}
}
func parseFlags() *config {
a := kingpin.New(filepath.Base(os.Args[0]), "Remote storage adapter")
a.HelpFlag.Short('h')
cfg := &config{
influxdbAuthToken: os.Getenv("INFLUXDB_AUTH_TOKEN"),
promslogConfig: promslog.Config{},
}
a.Flag("graphite-address", "The host:port of the Graphite server to send samples to. None, if empty.").
Default("").StringVar(&cfg.graphiteAddress)
a.Flag("graphite-transport", "Transport protocol to use to communicate with Graphite. 'tcp', if empty.").
Default("tcp").StringVar(&cfg.graphiteTransport)
a.Flag("graphite-prefix", "The prefix to prepend to all metrics exported to Graphite. None, if empty.").
Default("").StringVar(&cfg.graphitePrefix)
a.Flag("opentsdb-url", "The URL of the remote OpenTSDB server to send samples to. None, if empty.").
Default("").StringVar(&cfg.opentsdbURL)
a.Flag("influxdb-url", "The URL of the remote InfluxDB server to send samples to. None, if empty.").
Default("").StringVar(&cfg.influxdbURL)
a.Flag("influxdb.bucket", "The InfluxDB bucket to use.").
Default("").StringVar(&cfg.bucket)
a.Flag("influxdb.organization", "The name of the organization to use for storing samples in InfluxDB.").
Default("").StringVar(&cfg.organization)
a.Flag("send-timeout", "The timeout to use when sending samples to the remote storage.").
Default("30s").DurationVar(&cfg.remoteTimeout)
a.Flag("web.listen-address", "Address to listen on for web endpoints.").
Default(":9201").StringVar(&cfg.listenAddr)
a.Flag("web.telemetry-path", "Address to listen on for web endpoints.").
Default("/metrics").StringVar(&cfg.telemetryPath)
flag.AddFlags(a, &cfg.promslogConfig)
_, err := a.Parse(os.Args[1:])
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing commandline arguments: %s", err)
a.Usage(os.Args[1:])
os.Exit(2)
}
return cfg
}
type writer interface {
Write(samples model.Samples) error
Name() string
}
type reader interface {
Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error)
Name() string
}
func buildClients(logger *slog.Logger, cfg *config) ([]writer, []reader) {
var writers []writer
var readers []reader
if cfg.graphiteAddress != "" {
c := graphite.NewClient(
logger.With("storage", "Graphite"),
cfg.graphiteAddress, cfg.graphiteTransport,
cfg.remoteTimeout, cfg.graphitePrefix)
writers = append(writers, c)
}
if cfg.opentsdbURL != "" {
c := opentsdb.NewClient(
logger.With("storage", "OpenTSDB"),
cfg.opentsdbURL,
cfg.remoteTimeout,
)
writers = append(writers, c)
}
if cfg.influxdbURL != "" {
url, err := url.Parse(cfg.influxdbURL)
if err != nil {
logger.Error("Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err)
os.Exit(1)
}
c := influxdb.NewClient(
logger.With("storage", "InfluxDB"),
url.String(),
cfg.influxdbAuthToken,
cfg.organization,
cfg.bucket,
)
prometheus.MustRegister(c)
writers = append(writers, c)
readers = append(readers, c)
}
logger.Info("Starting up...")
return writers, readers
}
func serve(logger *slog.Logger, addr string, writers []writer, readers []reader) error {
http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeWriteRequest(r.Body)
if err != nil {
logger.Error("Read error", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
samples := protoToSamples(req)
receivedSamples.Add(float64(len(samples)))
var wg sync.WaitGroup
for _, w := range writers {
wg.Add(1)
go func(rw writer) {
sendSamples(logger, rw, samples)
wg.Done()
}(w)
}
wg.Wait()
})
http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) {
compressed, err := io.ReadAll(r.Body)
if err != nil {
logger.Error("Read error", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
logger.Error("Decode error", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var req prompb.ReadRequest
if err := proto.Unmarshal(reqBuf, &req); err != nil {
logger.Error("Unmarshal error", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// TODO: Support reading from more than one reader and merging the results.
if len(readers) != 1 {
http.Error(w, fmt.Sprintf("expected exactly one reader, found %d readers", len(readers)), http.StatusInternalServerError)
return
}
reader := readers[0]
var resp *prompb.ReadResponse
resp, err = reader.Read(&req)
if err != nil {
logger.Warn("Error executing query", "query", req, "storage", reader.Name(), "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data, err := proto.Marshal(resp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/x-protobuf")
w.Header().Set("Content-Encoding", "snappy")
compressed = snappy.Encode(nil, data)
if _, err := w.Write(compressed); err != nil {
logger.Warn("Error writing response", "storage", reader.Name(), "err", err)
}
})
return http.ListenAndServe(addr, nil)
}
func protoToSamples(req *prompb.WriteRequest) model.Samples {
var samples model.Samples
for _, ts := range req.Timeseries {
metric := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
for _, s := range ts.Samples {
samples = append(samples, &model.Sample{
Metric: metric,
Value: model.SampleValue(s.Value),
Timestamp: model.Time(s.Timestamp),
})
}
}
return samples
}
func sendSamples(logger *slog.Logger, w writer, samples model.Samples) {
begin := time.Now()
err := w.Write(samples)
duration := time.Since(begin).Seconds()
if err != nil {
logger.Warn("Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples))
failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))
}
sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))
sentBatchDuration.WithLabelValues(w.Name()).Observe(duration)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go | documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package influxdb
import (
"context"
"errors"
"fmt"
"log/slog"
"math"
"strings"
"time"
influx "github.com/influxdata/influxdb-client-go/v2"
"github.com/influxdata/influxdb-client-go/v2/api/query"
"github.com/influxdata/influxdb-client-go/v2/api/write"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/prompb"
)
// Client allows sending batches of Prometheus samples to InfluxDB.
type Client struct {
logger *slog.Logger
client influx.Client
organization string
bucket string
ignoredSamples prometheus.Counter
context context.Context
}
// NewClient creates a new Client.
func NewClient(logger *slog.Logger, url, authToken, organization, bucket string) *Client {
c := influx.NewClientWithOptions(
url,
authToken,
influx.DefaultOptions().SetPrecision(time.Millisecond),
)
if logger == nil {
logger = promslog.NewNopLogger()
}
return &Client{
logger: logger,
client: c,
organization: organization,
bucket: bucket,
ignoredSamples: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_influxdb_ignored_samples_total",
Help: "The total number of samples not sent to InfluxDB due to unsupported float values (Inf, -Inf, NaN).",
},
),
context: context.Background(),
}
}
// tagsFromMetric extracts InfluxDB tags from a Prometheus metric.
func tagsFromMetric(m model.Metric) map[string]string {
tags := make(map[string]string, len(m)-1)
for l, v := range m {
if l != model.MetricNameLabel {
tags[string(l)] = string(v)
}
}
return tags
}
// Write sends a batch of samples to InfluxDB via its HTTP API.
func (c *Client) Write(samples model.Samples) error {
points := make([]*write.Point, 0, len(samples))
for _, s := range samples {
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
c.logger.Debug("Cannot send to InfluxDB, skipping sample", "value", v, "sample", s)
c.ignoredSamples.Inc()
continue
}
p := influx.NewPoint(
string(s.Metric[model.MetricNameLabel]),
tagsFromMetric(s.Metric),
map[string]any{"value": v},
s.Timestamp.Time(),
)
points = append(points, p)
}
writeAPI := c.client.WriteAPIBlocking(c.organization, c.bucket)
writeAPI.EnableBatching() // default 5_000
var err error
for _, p := range points {
if err = writeAPI.WritePoint(c.context, p); err != nil {
return err
}
}
if err = writeAPI.Flush(c.context); err != nil {
return err
}
return nil
}
func (c *Client) Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) {
queryAPI := c.client.QueryAPI(c.organization)
labelsToSeries := map[string]*prompb.TimeSeries{}
for _, q := range req.Queries {
command, err := c.buildCommand(q)
if err != nil {
return nil, err
}
resp, err := queryAPI.Query(c.context, command)
if err != nil {
return nil, err
}
if resp.Err() != nil {
return nil, resp.Err()
}
for resp.Next() {
if err = mergeResult(labelsToSeries, resp.Record()); err != nil {
return nil, err
}
}
}
resp := prompb.ReadResponse{
Results: []*prompb.QueryResult{
{Timeseries: make([]*prompb.TimeSeries, 0, len(labelsToSeries))},
},
}
for _, ts := range labelsToSeries {
resp.Results[0].Timeseries = append(resp.Results[0].Timeseries, ts)
}
return &resp, nil
}
func (c *Client) buildCommand(q *prompb.Query) (string, error) {
rangeInNs := fmt.Sprintf("start: time(v: %v), stop: time(v: %v)", q.StartTimestampMs*time.Millisecond.Nanoseconds(), q.EndTimestampMs*time.Millisecond.Nanoseconds())
// If we don't find a metric name matcher, query all metrics
// (InfluxDB measurements) by default.
var measurement strings.Builder
measurement.WriteString(`r._measurement`)
matchers := make([]string, 0, len(q.Matchers))
var joinedMatchers string
for _, m := range q.Matchers {
if m.Name == model.MetricNameLabel {
switch m.Type {
case prompb.LabelMatcher_EQ:
measurement.WriteString(fmt.Sprintf(" == \"%s\"", m.Value))
case prompb.LabelMatcher_RE:
measurement.WriteString(fmt.Sprintf(" =~ /%s/", escapeSlashes(m.Value)))
default:
// TODO: Figure out how to support these efficiently.
return "", errors.New("non-equal or regex-non-equal matchers are not supported on the metric name yet")
}
continue
}
switch m.Type {
case prompb.LabelMatcher_EQ:
matchers = append(matchers, fmt.Sprintf("r.%s == \"%s\"", m.Name, escapeSingleQuotes(m.Value)))
case prompb.LabelMatcher_NEQ:
matchers = append(matchers, fmt.Sprintf("r.%s != \"%s\"", m.Name, escapeSingleQuotes(m.Value)))
case prompb.LabelMatcher_RE:
matchers = append(matchers, fmt.Sprintf("r.%s =~ /%s/", m.Name, escapeSingleQuotes(m.Value)))
case prompb.LabelMatcher_NRE:
matchers = append(matchers, fmt.Sprintf("r.%s !~ /%s/", m.Name, escapeSingleQuotes(m.Value)))
default:
return "", fmt.Errorf("unknown match type %v", m.Type)
}
}
if len(matchers) > 0 {
joinedMatchers = fmt.Sprintf(" and %s", strings.Join(matchers, " and "))
}
// _measurement must be retained, otherwise "invalid metric name" shall be thrown
command := fmt.Sprintf(
"from(bucket: \"%s\") |> range(%s) |> filter(fn: (r) => %s%s)",
c.bucket, rangeInNs, measurement.String(), joinedMatchers,
)
return command, nil
}
func escapeSingleQuotes(str string) string {
return strings.ReplaceAll(str, `'`, `\'`)
}
func escapeSlashes(str string) string {
return strings.ReplaceAll(str, `/`, `\/`)
}
func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, record *query.FluxRecord) error {
builtIntime := record.Time()
builtInvalue := record.Value()
builtInMeasurement := record.Measurement()
labels := record.Values()
filterOutBuiltInLabels(labels)
k := concatLabels(labels)
ts, ok := labelsToSeries[k]
if !ok {
ts = &prompb.TimeSeries{
Labels: tagsToLabelPairs(builtInMeasurement, labels),
}
labelsToSeries[k] = ts
}
sample, err := valuesToSamples(builtIntime, builtInvalue)
if err != nil {
return err
}
ts.Samples = mergeSamples(ts.Samples, []prompb.Sample{sample})
return nil
}
func filterOutBuiltInLabels(labels map[string]any) {
delete(labels, "table")
delete(labels, "_start")
delete(labels, "_stop")
delete(labels, "_time")
delete(labels, "_value")
delete(labels, "_field")
delete(labels, "result")
delete(labels, "_measurement")
}
func concatLabels(labels map[string]any) string {
// 0xff cannot occur in valid UTF-8 sequences, so use it
// as a separator here.
separator := "\xff"
pairs := make([]string, 0, len(labels))
for k, v := range labels {
pairs = append(pairs, fmt.Sprintf("%s%s%v", k, separator, v))
}
return strings.Join(pairs, separator)
}
func tagsToLabelPairs(name string, tags map[string]any) []prompb.Label {
pairs := make([]prompb.Label, 0, len(tags))
for k, v := range tags {
if v == nil {
// If we select metrics with different sets of labels names,
// InfluxDB returns *all* possible tag names on all returned
// series, with empty tag values on series where they don't
// apply. In Prometheus, an empty label value is equivalent
// to a non-existent label, so we just skip empty ones here
// to make the result correct.
continue
}
pairs = append(pairs, prompb.Label{
Name: k,
Value: fmt.Sprintf("%v", v),
})
}
pairs = append(pairs, prompb.Label{
Name: model.MetricNameLabel,
Value: name,
})
return pairs
}
func valuesToSamples(timestamp time.Time, value any) (prompb.Sample, error) {
var valueFloat64 float64
var valueInt64 int64
var ok bool
if valueFloat64, ok = value.(float64); !ok {
valueInt64, ok = value.(int64)
if !ok {
return prompb.Sample{}, fmt.Errorf("unable to convert sample value to float64: %v", value)
}
valueFloat64 = float64(valueInt64)
}
return prompb.Sample{
Timestamp: timestamp.UnixMilli(),
Value: valueFloat64,
}, nil
}
// mergeSamples merges two lists of sample pairs and removes duplicate
// timestamps. It assumes that both lists are sorted by timestamp.
func mergeSamples(a, b []prompb.Sample) []prompb.Sample {
result := make([]prompb.Sample, 0, len(a)+len(b))
i, j := 0, 0
for i < len(a) && j < len(b) {
switch {
case a[i].Timestamp < b[j].Timestamp:
result = append(result, a[i])
i++
case a[i].Timestamp > b[j].Timestamp:
result = append(result, b[j])
j++
default:
result = append(result, a[i])
i++
j++
}
}
result = append(result, a[i:]...)
result = append(result, b[j:]...)
return result
}
// Name identifies the client as an InfluxDB client.
func (Client) Name() string {
return "influxdb"
}
// Describe implements prometheus.Collector.
func (c *Client) Describe(ch chan<- *prometheus.Desc) {
ch <- c.ignoredSamples.Desc()
}
// Collect implements prometheus.Collector.
func (c *Client) Collect(ch chan<- prometheus.Metric) {
ch <- c.ignoredSamples
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go | documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package influxdb
import (
"io"
"math"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
func TestClient(t *testing.T) {
samples := model.Samples{
{
Metric: model.Metric{
model.MetricNameLabel: "testmetric",
"test_label": "test_label_value1",
},
Timestamp: model.Time(123456789123),
Value: 1.23,
},
{
Metric: model.Metric{
model.MetricNameLabel: "testmetric",
"test_label": "test_label_value2",
},
Timestamp: model.Time(123456789123),
Value: 5.1234,
},
{
Metric: model.Metric{
model.MetricNameLabel: "nan_value",
},
Timestamp: model.Time(123456789123),
Value: model.SampleValue(math.NaN()),
},
{
Metric: model.Metric{
model.MetricNameLabel: "pos_inf_value",
},
Timestamp: model.Time(123456789123),
Value: model.SampleValue(math.Inf(1)),
},
{
Metric: model.Metric{
model.MetricNameLabel: "neg_inf_value",
},
Timestamp: model.Time(123456789123),
Value: model.SampleValue(math.Inf(-1)),
},
}
expectedBody := `testmetric,test_label=test_label_value1 value=1.23 123456789123
testmetric,test_label=test_label_value2 value=5.1234 123456789123
`
server := httptest.NewServer(http.HandlerFunc(
func(_ http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodPost, r.Method, "Unexpected method.")
require.Equal(t, "/api/v2/write", r.URL.Path, "Unexpected path.")
b, err := io.ReadAll(r.Body)
require.NoError(t, err, "Error reading body.")
require.Equal(t, expectedBody, string(b), "Unexpected request body.")
},
))
defer server.Close()
serverURL, err := url.Parse(server.URL)
require.NoError(t, err, "Unable to parse server URL.")
c := NewClient(nil, serverURL.String(), "auth_token", "test_organization", "test_bucket")
err = c.Write(samples)
require.NoError(t, err, "Error sending samples.")
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go | documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graphite
import (
"bytes"
"fmt"
"log/slog"
"math"
"net"
"sort"
"time"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
)
// Client allows sending batches of Prometheus samples to Graphite.
type Client struct {
logger *slog.Logger
address string
transport string
timeout time.Duration
prefix string
}
// NewClient creates a new Client.
func NewClient(logger *slog.Logger, address, transport string, timeout time.Duration, prefix string) *Client {
if logger == nil {
logger = promslog.NewNopLogger()
}
return &Client{
logger: logger,
address: address,
transport: transport,
timeout: timeout,
prefix: prefix,
}
}
func pathFromMetric(m model.Metric, prefix string) string {
var buffer bytes.Buffer
buffer.WriteString(prefix)
buffer.WriteString(escape(m[model.MetricNameLabel]))
// We want to sort the labels.
labels := make(model.LabelNames, 0, len(m))
for l := range m {
labels = append(labels, l)
}
sort.Sort(labels)
// For each label, in order, add ".<label>.<value>".
for _, l := range labels {
v := m[l]
if l == model.MetricNameLabel || len(l) == 0 {
continue
}
// Since we use '.' instead of '=' to separate label and values
// it means that we can't have an '.' in the metric name. Fortunately
// this is prohibited in prometheus metrics.
buffer.WriteString(fmt.Sprintf(
".%s.%s", string(l), escape(v)))
}
return buffer.String()
}
// Write sends a batch of samples to Graphite.
func (c *Client) Write(samples model.Samples) error {
conn, err := net.DialTimeout(c.transport, c.address, c.timeout)
if err != nil {
return err
}
defer conn.Close()
var buf bytes.Buffer
for _, s := range samples {
k := pathFromMetric(s.Metric, c.prefix)
t := float64(s.Timestamp.UnixNano()) / 1e9
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
c.logger.Debug("Cannot send value to Graphite, skipping sample", "value", v, "sample", s)
continue
}
fmt.Fprintf(&buf, "%s %f %f\n", k, v, t)
}
_, err = conn.Write(buf.Bytes())
return err
}
// Name identifies the client as a Graphite client.
func (Client) Name() string {
return "graphite"
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go | documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graphite
import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
var metric = model.Metric{
model.MetricNameLabel: "test:metric",
"testlabel": "test:value",
"many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
}
func TestEscape(t *testing.T) {
// Can we correctly keep and escape valid chars.
value := "abzABZ019(){},'\"\\"
expected := "abzABZ019\\(\\)\\{\\}\\,\\'\\\"\\\\"
actual := escape(model.LabelValue(value))
require.Equal(t, expected, actual)
// Test percent-encoding.
value = "é/|_;:%."
expected = "%C3%A9%2F|_;:%25%2E"
actual = escape(model.LabelValue(value))
require.Equal(t, expected, actual)
}
func TestPathFromMetric(t *testing.T) {
expected := ("prefix." +
"test:metric" +
".many_chars.abc!ABC:012-3!45%C3%B667~89%2E%2F\\(\\)\\{\\}\\,%3D%2E\\\"\\\\" +
".testlabel.test:value")
actual := pathFromMetric(metric, "prefix.")
require.Equal(t, expected, actual)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go | documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graphite
import (
"bytes"
"fmt"
"strings"
"github.com/prometheus/common/model"
)
const (
// From https://github.com/graphite-project/graphite-web/blob/master/webapp/graphite/render/grammar.py#L83
symbols = "(){},=.'\"\\"
printables = ("0123456789abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~")
)
// Graphite doesn't support tags, so label names and values must be
// encoded into the metric path. The list of characters that are usable
// with Graphite is rather fuzzy. One 'source of truth' might be the grammar
// used to parse requests in the webapp:
// https://github.com/graphite-project/graphite-web/blob/master/webapp/graphite/render/grammar.py#L83
// The list of valid symbols is defined as:
// legal = printables - symbols + escaped(symbols)
//
// The default storage backend for Graphite (whisper) stores data
// in filenames, so we also need to use only valid filename characters.
// Fortunately on UNIX only '/' isn't, and Windows is completely unsupported
// by Graphite: http://graphite.readthedocs.org/en/latest/install.html#windows-users
// escape escapes a model.LabelValue into runes allowed in Graphite. The runes
// allowed in Graphite are all single-byte. This function encodes the arbitrary
// byte sequence found in this TagValue in way very similar to the traditional
// percent-encoding (https://en.wikipedia.org/wiki/Percent-encoding):
//
// - The string that underlies TagValue is scanned byte by byte.
//
// - If a byte represents a legal Graphite rune with the exception of '%', '/',
// '=' and '.', that byte is directly copied to the resulting byte slice.
// % is used for percent-encoding of other bytes.
// / is not usable in filenames.
// = is used when generating the path to associate values to labels.
// . already means something for Graphite and thus can't be used in a value.
//
// - If the byte is any of (){},=.'"\, then a '\' will be prepended to it. We
// do not percent-encode them since they are explicitly usable in this
// way in Graphite.
//
// - All other bytes are replaced by '%' followed by two bytes containing the
// uppercase ASCII representation of their hexadecimal value.
//
// This encoding allows to save arbitrary Go strings in Graphite. That's
// required because Prometheus label values can contain anything. Using
// percent encoding makes it easy to unescape, even in javascript.
//
// Examples:
//
// "foo-bar-42" -> "foo-bar-42"
//
// "foo_bar%42" -> "foo_bar%2542"
//
// "http://example.org:8080" -> "http:%2F%2Fexample%2Eorg:8080"
//
// "Björn's email: bjoern@soundcloud.com" ->
// "Bj%C3%B6rn's%20email:%20bjoern%40soundcloud.com"
//
// "日" -> "%E6%97%A5"
func escape(tv model.LabelValue) string {
length := len(tv)
result := bytes.NewBuffer(make([]byte, 0, length))
for i := range length {
b := tv[i]
switch {
// . is reserved by graphite, % is used to escape other bytes.
case b == '.' || b == '%' || b == '/' || b == '=':
fmt.Fprintf(result, "%%%X", b)
// These symbols are ok only if backslash escaped.
case strings.IndexByte(symbols, b) != -1:
result.WriteString("\\" + string(b))
// These are all fine.
case strings.IndexByte(printables, b) != -1:
result.WriteByte(b)
// Defaults to percent-encoding.
default:
fmt.Fprintf(result, "%%%X", b)
}
}
return result.String()
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go | documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opentsdb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log/slog"
"math"
"net/http"
"net/url"
"time"
"github.com/prometheus/common/model"
)
const (
putEndpoint = "/api/put"
contentTypeJSON = "application/json"
)
// Client allows sending batches of Prometheus samples to OpenTSDB.
type Client struct {
logger *slog.Logger
url string
timeout time.Duration
}
// NewClient creates a new Client.
func NewClient(logger *slog.Logger, url string, timeout time.Duration) *Client {
return &Client{
logger: logger,
url: url,
timeout: timeout,
}
}
// StoreSamplesRequest is used for building a JSON request for storing samples
// via the OpenTSDB.
type StoreSamplesRequest struct {
Metric TagValue `json:"metric"`
Timestamp int64 `json:"timestamp"`
Value float64 `json:"value"`
Tags map[string]TagValue `json:"tags"`
}
// tagsFromMetric translates Prometheus metric into OpenTSDB tags.
func tagsFromMetric(m model.Metric) map[string]TagValue {
tags := make(map[string]TagValue, len(m)-1)
for l, v := range m {
if l == model.MetricNameLabel {
continue
}
tags[string(l)] = TagValue(v)
}
return tags
}
// Write sends a batch of samples to OpenTSDB via its HTTP API.
func (c *Client) Write(samples model.Samples) error {
reqs := make([]StoreSamplesRequest, 0, len(samples))
for _, s := range samples {
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
c.logger.Debug("Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s)
continue
}
metric := TagValue(s.Metric[model.MetricNameLabel])
reqs = append(reqs, StoreSamplesRequest{
Metric: metric,
Timestamp: s.Timestamp.Unix(),
Value: v,
Tags: tagsFromMetric(s.Metric),
})
}
u, err := url.Parse(c.url)
if err != nil {
return err
}
u.Path = putEndpoint
buf, err := json.Marshal(reqs)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewBuffer(buf))
if err != nil {
return err
}
req.Header.Set("Content-Type", contentTypeJSON)
resp, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return err
}
defer func() {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}()
// API returns status code 204 for successful writes.
// http://opentsdb.net/docs/build/html/api_http/put.html
if resp.StatusCode == http.StatusNoContent {
return nil
}
// API returns status code 400 on error, encoding error details in the
// response content in JSON.
buf, err = io.ReadAll(resp.Body)
if err != nil {
return err
}
var r map[string]int
if err := json.Unmarshal(buf, &r); err != nil {
return err
}
return fmt.Errorf("failed to write %d samples to OpenTSDB, %d succeeded", r["failed"], r["success"])
}
// Name identifies the client as an OpenTSDB client.
func (Client) Name() string {
return "opentsdb"
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go | documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opentsdb
import (
"bytes"
"fmt"
"github.com/prometheus/common/model"
)
// TagValue is a model.LabelValue that implements json.Marshaler and
// json.Unmarshaler. These implementations avoid characters illegal in
// OpenTSDB. See the MarshalJSON for details. TagValue is used for the values of
// OpenTSDB tags as well as for OpenTSDB metric names.
type TagValue model.LabelValue
// MarshalJSON marshals this TagValue into JSON that only contains runes allowed
// in OpenTSDB. It implements json.Marshaler. The runes allowed in OpenTSDB are
// all single-byte. This function encodes the arbitrary byte sequence found in
// this TagValue in the following way:
//
// - The string that underlies TagValue is scanned byte by byte.
//
// - If a byte represents a legal OpenTSDB rune with the exception of '_', that
// byte is directly copied to the resulting JSON byte slice.
//
// - If '_' is encountered, it is replaced by '__'.
//
// - If ':' is encountered, it is replaced by '_.'.
//
// - All other bytes are replaced by '_' followed by two bytes containing the
// uppercase ASCII representation of their hexadecimal value.
//
// This encoding allows to save arbitrary Go strings in OpenTSDB. That's
// required because Prometheus label values can contain anything, and even
// Prometheus metric names may (and often do) contain ':' (which is disallowed
// in OpenTSDB strings). The encoding uses '_' as an escape character and
// renders a ':' more or less recognizable as '_.'
//
// Examples:
//
// "foo-bar-42" -> "foo-bar-42"
//
// "foo_bar_42" -> "foo__bar__42"
//
// "http://example.org:8080" -> "http_.//example.org_.8080"
//
// "Björn's email: bjoern@soundcloud.com" ->
// "Bj_C3_B6rn_27s_20email_._20bjoern_40soundcloud.com"
//
// "日" -> "_E6_97_A5"
func (tv TagValue) MarshalJSON() ([]byte, error) {
length := len(tv)
// Need at least two more bytes than in tv.
result := bytes.NewBuffer(make([]byte, 0, length+2))
result.WriteByte('"')
for i := range length {
b := tv[i]
switch {
case (b >= '-' && b <= '9') || // '-', '.', '/', 0-9
(b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z'):
result.WriteByte(b)
case b == '_':
result.WriteString("__")
case b == ':':
result.WriteString("_.")
default:
fmt.Fprintf(result, "_%X", b)
}
}
result.WriteByte('"')
return result.Bytes(), nil
}
// UnmarshalJSON unmarshals JSON strings coming from OpenTSDB into Go strings
// by applying the inverse of what is described for the MarshalJSON method.
func (tv *TagValue) UnmarshalJSON(json []byte) error {
escapeLevel := 0 // How many bytes after '_'.
var parsedByte byte
// Might need fewer bytes, but let's avoid realloc.
result := bytes.NewBuffer(make([]byte, 0, len(json)-2))
for i, b := range json {
if i == 0 {
if b != '"' {
return fmt.Errorf("expected '\"', got %q", b)
}
continue
}
if i == len(json)-1 {
if b != '"' {
return fmt.Errorf("expected '\"', got %q", b)
}
break
}
switch escapeLevel {
case 0:
if b == '_' {
escapeLevel = 1
continue
}
result.WriteByte(b)
case 1:
switch {
case b == '_':
result.WriteByte('_')
escapeLevel = 0
case b == '.':
result.WriteByte(':')
escapeLevel = 0
case b >= '0' && b <= '9':
parsedByte = (b - 48) << 4
escapeLevel = 2
case b >= 'A' && b <= 'F': // A-F
parsedByte = (b - 55) << 4
escapeLevel = 2
default:
return fmt.Errorf(
"illegal escape sequence at byte %d (%c)",
i, b,
)
}
case 2:
switch {
case b >= '0' && b <= '9':
parsedByte += b - 48
case b >= 'A' && b <= 'F': // A-F
parsedByte += b - 55
default:
return fmt.Errorf(
"illegal escape sequence at byte %d (%c)",
i, b,
)
}
result.WriteByte(parsedByte)
escapeLevel = 0
default:
panic("unexpected escape level")
}
}
*tv = TagValue(result.String())
return nil
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go | documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opentsdb
import (
"encoding/json"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
var metric = model.Metric{
model.MetricNameLabel: "test:metric",
"testlabel": "test:value",
"many_chars": "abc!ABC:012-3!45ö67~89./",
}
func TestTagsFromMetric(t *testing.T) {
expected := map[string]TagValue{
"testlabel": TagValue("test:value"),
"many_chars": TagValue("abc!ABC:012-3!45ö67~89./"),
}
actual := tagsFromMetric(metric)
require.Equal(t, expected, actual)
}
func TestMarshalStoreSamplesRequest(t *testing.T) {
request := StoreSamplesRequest{
Metric: TagValue("test:metric"),
Timestamp: 4711,
Value: 3.1415,
Tags: tagsFromMetric(metric),
}
expectedJSON := `{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}`
resultingJSON, err := json.Marshal(request)
require.NoError(t, err, "Marshal(request) resulted in err.")
require.JSONEq(t, expectedJSON, string(resultingJSON))
var unmarshaledRequest StoreSamplesRequest
err = json.Unmarshal([]byte(expectedJSON), &unmarshaledRequest)
require.NoError(t, err, "Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err.")
require.Equal(t, request, unmarshaledRequest)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue_test.go | documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opentsdb
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
)
var stringtests = []struct {
tv TagValue
json []byte
}{
{TagValue("foo-bar-42"), []byte(`"foo-bar-42"`)},
{TagValue("foo_bar_42"), []byte(`"foo__bar__42"`)},
{TagValue("http://example.org:8080"), []byte(`"http_.//example.org_.8080"`)},
{TagValue("Björn's email: bjoern@soundcloud.com"), []byte(`"Bj_C3_B6rn_27s_20email_._20bjoern_40soundcloud.com"`)},
{TagValue("日"), []byte(`"_E6_97_A5"`)},
}
func TestTagValueMarshaling(t *testing.T) {
for i, tt := range stringtests {
got, err := json.Marshal(tt.tv)
require.NoError(t, err, "%d. Marshal(%q) returned error.", i, tt.tv)
require.Equal(t, tt.json, got, "%d. Marshal(%q) not equal.", i, tt.tv)
}
}
func TestTagValueUnMarshaling(t *testing.T) {
for i, tt := range stringtests {
var tv TagValue
err := json.Unmarshal(tt.json, &tv)
require.NoError(t, err, "%d. Unmarshal(%q, &str) returned error.", i, tt.json)
require.Equal(t, tt.tv, tv, "%d. Unmarshal(%q, &str) not equal.", i, tt.json)
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/schema/labels.go | schema/labels.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
)
// IsMetadataLabel returns true if the given label name is a special
// schema Metadata label.
func IsMetadataLabel(name string) bool {
return name == model.MetricNameLabel || name == model.MetricTypeLabel || name == model.MetricUnitLabel
}
// Metadata represents the core metric schema/metadata elements that:
// * are describing and identifying the metric schema/shape (e.g. name, type and unit).
// * are contributing to the general metric/series identity.
// * with the type-and-unit feature, are stored as Prometheus labels.
//
// Historically, similar information was encoded in the labels.MetricName (suffixes)
// and in the separate metadata.Metadata structures. However, with the
// type-and-unit-label feature (PROM-39), this information can be now stored directly
// in the special schema metadata labels, which offers better reliability (e.g. atomicity),
// compatibility and, in many cases, efficiency.
//
// NOTE: Metadata in the current form is generally similar (yet different) to:
// - The MetricFamily definition in OpenMetrics (https://prometheus.io/docs/specs/om/open_metrics_spec/#metricfamily).
// However, there is a small and important distinction around the metric name semantics
// for the "classic" representation of complex metrics like histograms. The
// Metadata.Name follows the __name__ semantics. See Name for details.
// - Original metadata.Metadata entries. However, not all fields in that metadata
// are "identifiable", notably the help field, plus metadata does not contain Name.
type Metadata struct {
// Name represents the final metric name for a Prometheus series.
// NOTE(bwplotka): Prometheus scrape formats (e.g. OpenMetrics) define
// the "metric family name". The Metadata.Name (so __name__ label) is not
// always the same as the MetricFamily.Name e.g.:
// * OpenMetrics metric family name on scrape: "acme_http_router_request_seconds"
// * Resulting Prometheus metric name: "acme_http_router_request_seconds_sum"
//
// Empty string means nameless metric (e.g. result of the PromQL function).
Name string
// Type represents the metric type. Empty value ("") is equivalent to
// model.UnknownMetricType.
Type model.MetricType
// Unit represents the metric unit. Empty string means an unitless metric (e.g.
// result of the PromQL function).
//
// NOTE: Currently unit value is not strictly defined other than OpenMetrics
// recommendations: https://prometheus.io/docs/specs/om/open_metrics_spec/#units-and-base-units
// TODO(bwplotka): Consider a stricter validation and rules e.g. lowercase only or UCUM standard.
// Read more in https://github.com/prometheus/proposals/blob/main/proposals/2024-09-25_metadata-labels.md#more-strict-unit-and-type-value-definition
Unit string
}
// NewMetadataFromLabels returns the schema metadata from the labels.
func NewMetadataFromLabels(ls labels.Labels) Metadata {
typ := model.MetricTypeUnknown
if got := ls.Get(model.MetricTypeLabel); got != "" {
typ = model.MetricType(got)
}
return Metadata{
Name: ls.Get(model.MetricNameLabel),
Type: typ,
Unit: ls.Get(model.MetricUnitLabel),
}
}
// IsTypeEmpty returns true if the metric type is empty (not set).
func (m Metadata) IsTypeEmpty() bool {
return m.Type == "" || m.Type == model.MetricTypeUnknown
}
// IsEmptyFor returns true if the Metadata field, represented by the given labelName
// is empty (not set). If the labelName in not representing any Metadata field,
// IsEmptyFor returns true.
func (m Metadata) IsEmptyFor(labelName string) bool {
switch labelName {
case model.MetricNameLabel:
return m.Name == ""
case model.MetricTypeLabel:
return m.IsTypeEmpty()
case model.MetricUnitLabel:
return m.Unit == ""
default:
return true
}
}
// AddToLabels adds metric schema metadata as labels into the labels.ScratchBuilder.
// Empty Metadata fields will be ignored (not added).
func (m Metadata) AddToLabels(b *labels.ScratchBuilder) {
if m.Name != "" {
b.Add(model.MetricNameLabel, m.Name)
}
if !m.IsTypeEmpty() {
b.Add(model.MetricTypeLabel, string(m.Type))
}
if m.Unit != "" {
b.Add(model.MetricUnitLabel, m.Unit)
}
}
// SetToLabels injects metric schema metadata as labels into the labels.Builder.
// It follows the labels.Builder.Set semantics, so empty Metadata fields will
// remove the corresponding existing labels if they were previously set.
func (m Metadata) SetToLabels(b *labels.Builder) {
b.Set(model.MetricNameLabel, m.Name)
if m.Type == model.MetricTypeUnknown {
// Unknown equals empty semantically, so remove the label on unknown too as per
// method signature comment.
b.Set(model.MetricTypeLabel, "")
} else {
b.Set(model.MetricTypeLabel, string(m.Type))
}
b.Set(model.MetricUnitLabel, m.Unit)
}
// NewIgnoreOverriddenMetadataLabelScratchBuilder creates IgnoreOverriddenMetadataLabelScratchBuilder.
func (m Metadata) NewIgnoreOverriddenMetadataLabelScratchBuilder(b *labels.ScratchBuilder) *IgnoreOverriddenMetadataLabelScratchBuilder {
return &IgnoreOverriddenMetadataLabelScratchBuilder{ScratchBuilder: b, overwrite: m}
}
// IgnoreOverriddenMetadataLabelScratchBuilder is a wrapper over labels.ScratchBuilder
// that ignores label additions that would collide with non-empty Overwrite Metadata fields.
type IgnoreOverriddenMetadataLabelScratchBuilder struct {
*labels.ScratchBuilder
overwrite Metadata
}
// Add a name/value pair, unless it would collide with the non-empty Overwrite Metadata
// field. Note if you Add the same name twice you will get a duplicate label, which is invalid.
func (b IgnoreOverriddenMetadataLabelScratchBuilder) Add(name, value string) {
if !b.overwrite.IsEmptyFor(name) {
return
}
b.ScratchBuilder.Add(name, value)
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/schema/labels_test.go | schema/labels_test.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"fmt"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMetadata(t *testing.T) {
testMeta := Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
Unit: "seconds",
}
for _, tcase := range []struct {
emptyName, emptyType, emptyUnit bool
}{
{},
{emptyName: true},
{emptyType: true},
{emptyUnit: true},
{emptyName: true, emptyType: true, emptyUnit: true},
} {
var (
expectedMeta Metadata
expectedLabels labels.Labels
)
{
// Setup expectations.
lb := labels.NewScratchBuilder(0)
lb.Add("foo", "bar")
if !tcase.emptyName {
lb.Add(model.MetricNameLabel, testMeta.Name)
expectedMeta.Name = testMeta.Name
}
if !tcase.emptyType {
lb.Add(model.MetricTypeLabel, string(testMeta.Type))
expectedMeta.Type = testMeta.Type
} else {
expectedMeta.Type = model.MetricTypeUnknown
}
if !tcase.emptyUnit {
lb.Add(model.MetricUnitLabel, testMeta.Unit)
expectedMeta.Unit = testMeta.Unit
}
lb.Sort()
expectedLabels = lb.Labels()
}
t.Run(fmt.Sprintf("meta=%#v", expectedMeta), func(t *testing.T) {
{
// From labels to Metadata.
got := NewMetadataFromLabels(expectedLabels)
require.Equal(t, expectedMeta, got)
}
{
// Empty methods.
require.Equal(t, tcase.emptyName, expectedMeta.IsEmptyFor(model.MetricNameLabel))
require.Equal(t, tcase.emptyType, expectedMeta.IsEmptyFor(model.MetricTypeLabel))
require.Equal(t, tcase.emptyType, expectedMeta.IsTypeEmpty())
require.Equal(t, tcase.emptyUnit, expectedMeta.IsEmptyFor(model.MetricUnitLabel))
}
{
// From Metadata to labels for various builders.
slb := labels.NewScratchBuilder(0)
slb.Add("foo", "bar")
expectedMeta.AddToLabels(&slb)
slb.Sort()
testutil.RequireEqual(t, expectedLabels, slb.Labels())
lb := labels.NewBuilder(labels.FromStrings("foo", "bar"))
expectedMeta.SetToLabels(lb)
testutil.RequireEqual(t, expectedLabels, lb.Labels())
}
})
}
}
func TestIgnoreOverriddenMetadataLabelsScratchBuilder(t *testing.T) {
// PROM-39 specifies that metadata labels should be sourced primarily from the metadata structures.
// However, the original labels should be preserved IF the metadata structure does not set or support certain information.
// Test those cases with common label interactions.
incomingLabels := labels.FromStrings(model.MetricNameLabel, "different_name", model.MetricTypeLabel, string(model.MetricTypeSummary), model.MetricUnitLabel, "MB", "foo", "bar")
for _, tcase := range []struct {
highPrioMeta Metadata
expectedLabels labels.Labels
}{
{
expectedLabels: incomingLabels,
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(model.MetricNameLabel, "metric_total", model.MetricTypeLabel, string(model.MetricTypeCounter), model.MetricUnitLabel, "seconds", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
},
expectedLabels: labels.FromStrings(model.MetricNameLabel, "metric_total", model.MetricTypeLabel, string(model.MetricTypeCounter), model.MetricUnitLabel, "MB", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Type: model.MetricTypeCounter,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(model.MetricNameLabel, "different_name", model.MetricTypeLabel, string(model.MetricTypeCounter), model.MetricUnitLabel, "seconds", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeUnknown,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(model.MetricNameLabel, "metric_total", model.MetricTypeLabel, string(model.MetricTypeSummary), model.MetricUnitLabel, "seconds", "foo", "bar"),
},
} {
t.Run(fmt.Sprintf("meta=%#v", tcase.highPrioMeta), func(t *testing.T) {
lb := labels.NewScratchBuilder(0)
tcase.highPrioMeta.AddToLabels(&lb)
wrapped := tcase.highPrioMeta.NewIgnoreOverriddenMetadataLabelScratchBuilder(&lb)
incomingLabels.Range(func(l labels.Label) {
wrapped.Add(l.Name, l.Value)
})
lb.Sort()
require.Equal(t, tcase.expectedLabels, lb.Labels())
})
}
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
prometheus/prometheus | https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/promql/fuzz.go | promql/fuzz.go | // Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Only build when go-fuzz is in use
//go:build gofuzz
package promql
import (
"errors"
"io"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse"
"github.com/prometheus/prometheus/promql/parser"
)
// PromQL parser fuzzing instrumentation for use with
// https://github.com/dvyukov/go-fuzz.
//
// Fuzz each parser by building appropriately instrumented parser, ex.
// FuzzParseMetric and execute it with it's
//
// go-fuzz-build -func FuzzParseMetric -o FuzzParseMetric.zip github.com/prometheus/prometheus/promql
//
// And then run the tests with the appropriate inputs
//
// go-fuzz -bin FuzzParseMetric.zip -workdir fuzz-data/ParseMetric
//
// Further input samples should go in the folders fuzz-data/ParseMetric/corpus.
//
// Repeat for FuzzParseOpenMetric, FuzzParseMetricSelector and FuzzParseExpr.
// Tuning which value is returned from Fuzz*-functions has a strong influence
// on how quick the fuzzer converges on "interesting" cases. At least try
// switching between fuzzMeh (= included in corpus, but not a priority) and
// fuzzDiscard (=don't use this input for re-building later inputs) when
// experimenting.
const (
fuzzInteresting = 1
fuzzMeh = 0
fuzzDiscard = -1
// Input size above which we know that Prometheus would consume too much
// memory. The recommended way to deal with it is check input size.
// https://google.github.io/oss-fuzz/getting-started/new-project-guide/#input-size
maxInputSize = 10240
)
// Use package-scope symbol table to avoid memory allocation on every fuzzing operation.
var symbolTable = labels.NewSymbolTable()
func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p, warning := textparse.New(in, contentType, symbolTable, textparse.ParserOptions{})
if p == nil || warning != nil {
// An invalid content type is being passed, which should not happen
// in this context.
panic(warning)
}
var err error
for {
_, err = p.Next()
if err != nil {
break
}
}
if errors.Is(err, io.EOF) {
err = nil
}
if err == nil {
return fuzzInteresting
}
return fuzzMeh
}
// Fuzz the metric parser.
//
// Note that this is not the parser for the text-based exposition-format; that
// lives in github.com/prometheus/client_golang/text.
func FuzzParseMetric(in []byte) int {
return fuzzParseMetricWithContentType(in, "text/plain")
}
func FuzzParseOpenMetric(in []byte) int {
return fuzzParseMetricWithContentType(in, "application/openmetrics-text")
}
// Fuzz the metric selector parser.
func FuzzParseMetricSelector(in []byte) int {
if len(in) > maxInputSize {
return fuzzMeh
}
_, err := parser.ParseMetricSelector(string(in))
if err == nil {
return fuzzInteresting
}
return fuzzMeh
}
// Fuzz the expression parser.
func FuzzParseExpr(in []byte) int {
if len(in) > maxInputSize {
return fuzzMeh
}
_, err := parser.ParseExpr(string(in))
if err == nil {
return fuzzInteresting
}
return fuzzMeh
}
| go | Apache-2.0 | 66bdc88013e6c6098da7026ce828d3b33235d527 | 2026-01-07T08:35:43.488477Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.