repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
list | docstring
stringlengths 6
2.61k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 85
252
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
prometheus/tsdb
|
wal.go
|
truncate
|
func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error {
level.Error(w.logger).Log("msg", "WAL corruption detected; truncating",
"err", err, "file", w.files[file].Name(), "pos", lastOffset)
// Close and delete all files after the current one.
for _, f := range w.files[file+1:] {
if err := f.Close(); err != nil {
return err
}
if err := os.Remove(f.Name()); err != nil {
return err
}
}
w.mtx.Lock()
defer w.mtx.Unlock()
w.files = w.files[:file+1]
// Seek the current file to the last valid offset where we continue writing from.
_, err = w.files[file].Seek(lastOffset, io.SeekStart)
return err
}
|
go
|
func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error {
level.Error(w.logger).Log("msg", "WAL corruption detected; truncating",
"err", err, "file", w.files[file].Name(), "pos", lastOffset)
// Close and delete all files after the current one.
for _, f := range w.files[file+1:] {
if err := f.Close(); err != nil {
return err
}
if err := os.Remove(f.Name()); err != nil {
return err
}
}
w.mtx.Lock()
defer w.mtx.Unlock()
w.files = w.files[:file+1]
// Seek the current file to the last valid offset where we continue writing from.
_, err = w.files[file].Seek(lastOffset, io.SeekStart)
return err
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"truncate",
"(",
"err",
"error",
",",
"file",
"int",
",",
"lastOffset",
"int64",
")",
"error",
"{",
"level",
".",
"Error",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"err",
",",
"\"",
"\"",
",",
"w",
".",
"files",
"[",
"file",
"]",
".",
"Name",
"(",
")",
",",
"\"",
"\"",
",",
"lastOffset",
")",
"\n\n",
"// Close and delete all files after the current one.",
"for",
"_",
",",
"f",
":=",
"range",
"w",
".",
"files",
"[",
"file",
"+",
"1",
":",
"]",
"{",
"if",
"err",
":=",
"f",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"os",
".",
"Remove",
"(",
"f",
".",
"Name",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n\n",
"w",
".",
"files",
"=",
"w",
".",
"files",
"[",
":",
"file",
"+",
"1",
"]",
"\n\n",
"// Seek the current file to the last valid offset where we continue writing from.",
"_",
",",
"err",
"=",
"w",
".",
"files",
"[",
"file",
"]",
".",
"Seek",
"(",
"lastOffset",
",",
"io",
".",
"SeekStart",
")",
"\n",
"return",
"err",
"\n",
"}"
] |
// truncate the WAL after the last valid entry.
|
[
"truncate",
"the",
"WAL",
"after",
"the",
"last",
"valid",
"entry",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L259-L280
|
train
|
prometheus/tsdb
|
wal.go
|
Reader
|
func (w *SegmentWAL) Reader() WALReader {
return &repairingWALReader{
wal: w,
r: newWALReader(w.files, w.logger),
}
}
|
go
|
func (w *SegmentWAL) Reader() WALReader {
return &repairingWALReader{
wal: w,
r: newWALReader(w.files, w.logger),
}
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"Reader",
"(",
")",
"WALReader",
"{",
"return",
"&",
"repairingWALReader",
"{",
"wal",
":",
"w",
",",
"r",
":",
"newWALReader",
"(",
"w",
".",
"files",
",",
"w",
".",
"logger",
")",
",",
"}",
"\n",
"}"
] |
// Reader returns a new reader over the the write ahead log data.
// It must be completely consumed before writing to the WAL.
|
[
"Reader",
"returns",
"a",
"new",
"reader",
"over",
"the",
"the",
"write",
"ahead",
"log",
"data",
".",
"It",
"must",
"be",
"completely",
"consumed",
"before",
"writing",
"to",
"the",
"WAL",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L284-L289
|
train
|
prometheus/tsdb
|
wal.go
|
LogSeries
|
func (w *SegmentWAL) LogSeries(series []RefSeries) error {
buf := w.getBuffer()
flag := w.encodeSeries(buf, series)
w.mtx.Lock()
defer w.mtx.Unlock()
err := w.write(WALEntrySeries, flag, buf.Get())
w.putBuffer(buf)
if err != nil {
return errors.Wrap(err, "log series")
}
tf := w.head()
for _, s := range series {
if tf.minSeries > s.Ref {
tf.minSeries = s.Ref
}
}
return nil
}
|
go
|
func (w *SegmentWAL) LogSeries(series []RefSeries) error {
buf := w.getBuffer()
flag := w.encodeSeries(buf, series)
w.mtx.Lock()
defer w.mtx.Unlock()
err := w.write(WALEntrySeries, flag, buf.Get())
w.putBuffer(buf)
if err != nil {
return errors.Wrap(err, "log series")
}
tf := w.head()
for _, s := range series {
if tf.minSeries > s.Ref {
tf.minSeries = s.Ref
}
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"LogSeries",
"(",
"series",
"[",
"]",
"RefSeries",
")",
"error",
"{",
"buf",
":=",
"w",
".",
"getBuffer",
"(",
")",
"\n\n",
"flag",
":=",
"w",
".",
"encodeSeries",
"(",
"buf",
",",
"series",
")",
"\n\n",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n\n",
"err",
":=",
"w",
".",
"write",
"(",
"WALEntrySeries",
",",
"flag",
",",
"buf",
".",
"Get",
"(",
")",
")",
"\n\n",
"w",
".",
"putBuffer",
"(",
"buf",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"tf",
":=",
"w",
".",
"head",
"(",
")",
"\n\n",
"for",
"_",
",",
"s",
":=",
"range",
"series",
"{",
"if",
"tf",
".",
"minSeries",
">",
"s",
".",
"Ref",
"{",
"tf",
".",
"minSeries",
"=",
"s",
".",
"Ref",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// LogSeries writes a batch of new series labels to the log.
// The series have to be ordered.
|
[
"LogSeries",
"writes",
"a",
"batch",
"of",
"new",
"series",
"labels",
"to",
"the",
"log",
".",
"The",
"series",
"have",
"to",
"be",
"ordered",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L429-L453
|
train
|
prometheus/tsdb
|
wal.go
|
LogSamples
|
func (w *SegmentWAL) LogSamples(samples []RefSample) error {
buf := w.getBuffer()
flag := w.encodeSamples(buf, samples)
w.mtx.Lock()
defer w.mtx.Unlock()
err := w.write(WALEntrySamples, flag, buf.Get())
w.putBuffer(buf)
if err != nil {
return errors.Wrap(err, "log series")
}
tf := w.head()
for _, s := range samples {
if tf.maxTime < s.T {
tf.maxTime = s.T
}
}
return nil
}
|
go
|
func (w *SegmentWAL) LogSamples(samples []RefSample) error {
buf := w.getBuffer()
flag := w.encodeSamples(buf, samples)
w.mtx.Lock()
defer w.mtx.Unlock()
err := w.write(WALEntrySamples, flag, buf.Get())
w.putBuffer(buf)
if err != nil {
return errors.Wrap(err, "log series")
}
tf := w.head()
for _, s := range samples {
if tf.maxTime < s.T {
tf.maxTime = s.T
}
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"LogSamples",
"(",
"samples",
"[",
"]",
"RefSample",
")",
"error",
"{",
"buf",
":=",
"w",
".",
"getBuffer",
"(",
")",
"\n\n",
"flag",
":=",
"w",
".",
"encodeSamples",
"(",
"buf",
",",
"samples",
")",
"\n\n",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n\n",
"err",
":=",
"w",
".",
"write",
"(",
"WALEntrySamples",
",",
"flag",
",",
"buf",
".",
"Get",
"(",
")",
")",
"\n\n",
"w",
".",
"putBuffer",
"(",
"buf",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"tf",
":=",
"w",
".",
"head",
"(",
")",
"\n\n",
"for",
"_",
",",
"s",
":=",
"range",
"samples",
"{",
"if",
"tf",
".",
"maxTime",
"<",
"s",
".",
"T",
"{",
"tf",
".",
"maxTime",
"=",
"s",
".",
"T",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// LogSamples writes a batch of new samples to the log.
|
[
"LogSamples",
"writes",
"a",
"batch",
"of",
"new",
"samples",
"to",
"the",
"log",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L456-L479
|
train
|
prometheus/tsdb
|
wal.go
|
LogDeletes
|
func (w *SegmentWAL) LogDeletes(stones []Stone) error {
buf := w.getBuffer()
flag := w.encodeDeletes(buf, stones)
w.mtx.Lock()
defer w.mtx.Unlock()
err := w.write(WALEntryDeletes, flag, buf.Get())
w.putBuffer(buf)
if err != nil {
return errors.Wrap(err, "log series")
}
tf := w.head()
for _, s := range stones {
for _, iv := range s.intervals {
if tf.maxTime < iv.Maxt {
tf.maxTime = iv.Maxt
}
}
}
return nil
}
|
go
|
func (w *SegmentWAL) LogDeletes(stones []Stone) error {
buf := w.getBuffer()
flag := w.encodeDeletes(buf, stones)
w.mtx.Lock()
defer w.mtx.Unlock()
err := w.write(WALEntryDeletes, flag, buf.Get())
w.putBuffer(buf)
if err != nil {
return errors.Wrap(err, "log series")
}
tf := w.head()
for _, s := range stones {
for _, iv := range s.intervals {
if tf.maxTime < iv.Maxt {
tf.maxTime = iv.Maxt
}
}
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"LogDeletes",
"(",
"stones",
"[",
"]",
"Stone",
")",
"error",
"{",
"buf",
":=",
"w",
".",
"getBuffer",
"(",
")",
"\n\n",
"flag",
":=",
"w",
".",
"encodeDeletes",
"(",
"buf",
",",
"stones",
")",
"\n\n",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n\n",
"err",
":=",
"w",
".",
"write",
"(",
"WALEntryDeletes",
",",
"flag",
",",
"buf",
".",
"Get",
"(",
")",
")",
"\n\n",
"w",
".",
"putBuffer",
"(",
"buf",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"tf",
":=",
"w",
".",
"head",
"(",
")",
"\n\n",
"for",
"_",
",",
"s",
":=",
"range",
"stones",
"{",
"for",
"_",
",",
"iv",
":=",
"range",
"s",
".",
"intervals",
"{",
"if",
"tf",
".",
"maxTime",
"<",
"iv",
".",
"Maxt",
"{",
"tf",
".",
"maxTime",
"=",
"iv",
".",
"Maxt",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// LogDeletes write a batch of new deletes to the log.
|
[
"LogDeletes",
"write",
"a",
"batch",
"of",
"new",
"deletes",
"to",
"the",
"log",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L482-L507
|
train
|
prometheus/tsdb
|
wal.go
|
openSegmentFile
|
func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
// We must open all files in read/write mode as we may have to truncate along
// the way and any file may become the head.
f, err := os.OpenFile(name, os.O_RDWR, 0666)
if err != nil {
return nil, err
}
metab := make([]byte, 8)
// If there is an error, we need close f for platform windows before gc.
// Otherwise, file op may fail.
hasError := true
defer func() {
if hasError {
f.Close()
}
}()
if n, err := f.Read(metab); err != nil {
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
} else if n != 8 {
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
}
if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic {
return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name())
}
if metab[4] != WALFormatDefault {
return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name())
}
hasError = false
return f, nil
}
|
go
|
func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
// We must open all files in read/write mode as we may have to truncate along
// the way and any file may become the head.
f, err := os.OpenFile(name, os.O_RDWR, 0666)
if err != nil {
return nil, err
}
metab := make([]byte, 8)
// If there is an error, we need close f for platform windows before gc.
// Otherwise, file op may fail.
hasError := true
defer func() {
if hasError {
f.Close()
}
}()
if n, err := f.Read(metab); err != nil {
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
} else if n != 8 {
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
}
if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic {
return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name())
}
if metab[4] != WALFormatDefault {
return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name())
}
hasError = false
return f, nil
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"openSegmentFile",
"(",
"name",
"string",
")",
"(",
"*",
"os",
".",
"File",
",",
"error",
")",
"{",
"// We must open all files in read/write mode as we may have to truncate along",
"// the way and any file may become the head.",
"f",
",",
"err",
":=",
"os",
".",
"OpenFile",
"(",
"name",
",",
"os",
".",
"O_RDWR",
",",
"0666",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"metab",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"8",
")",
"\n\n",
"// If there is an error, we need close f for platform windows before gc.",
"// Otherwise, file op may fail.",
"hasError",
":=",
"true",
"\n",
"defer",
"func",
"(",
")",
"{",
"if",
"hasError",
"{",
"f",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"if",
"n",
",",
"err",
":=",
"f",
".",
"Read",
"(",
"metab",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"f",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"else",
"if",
"n",
"!=",
"8",
"{",
"return",
"nil",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"n",
",",
"f",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n\n",
"if",
"m",
":=",
"binary",
".",
"BigEndian",
".",
"Uint32",
"(",
"metab",
"[",
":",
"4",
"]",
")",
";",
"m",
"!=",
"WALMagic",
"{",
"return",
"nil",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"m",
",",
"f",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"if",
"metab",
"[",
"4",
"]",
"!=",
"WALFormatDefault",
"{",
"return",
"nil",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"metab",
"[",
"4",
"]",
",",
"f",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"hasError",
"=",
"false",
"\n",
"return",
"f",
",",
"nil",
"\n",
"}"
] |
// openSegmentFile opens the given segment file and consumes and validates header.
|
[
"openSegmentFile",
"opens",
"the",
"given",
"segment",
"file",
"and",
"consumes",
"and",
"validates",
"header",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L510-L542
|
train
|
prometheus/tsdb
|
wal.go
|
createSegmentFile
|
func (w *SegmentWAL) createSegmentFile(name string) (*os.File, error) {
f, err := os.Create(name)
if err != nil {
return nil, err
}
if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil {
return nil, err
}
// Write header metadata for new file.
metab := make([]byte, 8)
binary.BigEndian.PutUint32(metab[:4], WALMagic)
metab[4] = WALFormatDefault
if _, err := f.Write(metab); err != nil {
return nil, err
}
return f, err
}
|
go
|
func (w *SegmentWAL) createSegmentFile(name string) (*os.File, error) {
f, err := os.Create(name)
if err != nil {
return nil, err
}
if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil {
return nil, err
}
// Write header metadata for new file.
metab := make([]byte, 8)
binary.BigEndian.PutUint32(metab[:4], WALMagic)
metab[4] = WALFormatDefault
if _, err := f.Write(metab); err != nil {
return nil, err
}
return f, err
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"createSegmentFile",
"(",
"name",
"string",
")",
"(",
"*",
"os",
".",
"File",
",",
"error",
")",
"{",
"f",
",",
"err",
":=",
"os",
".",
"Create",
"(",
"name",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"err",
"=",
"fileutil",
".",
"Preallocate",
"(",
"f",
",",
"w",
".",
"segmentSize",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"// Write header metadata for new file.",
"metab",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"8",
")",
"\n",
"binary",
".",
"BigEndian",
".",
"PutUint32",
"(",
"metab",
"[",
":",
"4",
"]",
",",
"WALMagic",
")",
"\n",
"metab",
"[",
"4",
"]",
"=",
"WALFormatDefault",
"\n\n",
"if",
"_",
",",
"err",
":=",
"f",
".",
"Write",
"(",
"metab",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"f",
",",
"err",
"\n",
"}"
] |
// createSegmentFile creates a new segment file with the given name. It preallocates
// the standard segment size if possible and writes the header.
|
[
"createSegmentFile",
"creates",
"a",
"new",
"segment",
"file",
"with",
"the",
"given",
"name",
".",
"It",
"preallocates",
"the",
"standard",
"segment",
"size",
"if",
"possible",
"and",
"writes",
"the",
"header",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L546-L563
|
train
|
prometheus/tsdb
|
wal.go
|
cut
|
func (w *SegmentWAL) cut() error {
// Sync current head to disk and close.
if hf := w.head(); hf != nil {
if err := w.flush(); err != nil {
return err
}
// Finish last segment asynchronously to not block the WAL moving along
// in the new segment.
go func() {
w.actorc <- func() error {
off, err := hf.Seek(0, io.SeekCurrent)
if err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
if err := hf.Truncate(off); err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
if err := hf.Sync(); err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
if err := hf.Close(); err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
return nil
}
}()
}
p, _, err := nextSequenceFile(w.dirFile.Name())
if err != nil {
return err
}
f, err := w.createSegmentFile(p)
if err != nil {
return err
}
go func() {
w.actorc <- func() error {
return errors.Wrap(w.dirFile.Sync(), "sync WAL directory")
}
}()
w.files = append(w.files, newSegmentFile(f))
// TODO(gouthamve): make the buffer size a constant.
w.cur = bufio.NewWriterSize(f, 8*1024*1024)
w.curN = 8
return nil
}
|
go
|
func (w *SegmentWAL) cut() error {
// Sync current head to disk and close.
if hf := w.head(); hf != nil {
if err := w.flush(); err != nil {
return err
}
// Finish last segment asynchronously to not block the WAL moving along
// in the new segment.
go func() {
w.actorc <- func() error {
off, err := hf.Seek(0, io.SeekCurrent)
if err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
if err := hf.Truncate(off); err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
if err := hf.Sync(); err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
if err := hf.Close(); err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
return nil
}
}()
}
p, _, err := nextSequenceFile(w.dirFile.Name())
if err != nil {
return err
}
f, err := w.createSegmentFile(p)
if err != nil {
return err
}
go func() {
w.actorc <- func() error {
return errors.Wrap(w.dirFile.Sync(), "sync WAL directory")
}
}()
w.files = append(w.files, newSegmentFile(f))
// TODO(gouthamve): make the buffer size a constant.
w.cur = bufio.NewWriterSize(f, 8*1024*1024)
w.curN = 8
return nil
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"cut",
"(",
")",
"error",
"{",
"// Sync current head to disk and close.",
"if",
"hf",
":=",
"w",
".",
"head",
"(",
")",
";",
"hf",
"!=",
"nil",
"{",
"if",
"err",
":=",
"w",
".",
"flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// Finish last segment asynchronously to not block the WAL moving along",
"// in the new segment.",
"go",
"func",
"(",
")",
"{",
"w",
".",
"actorc",
"<-",
"func",
"(",
")",
"error",
"{",
"off",
",",
"err",
":=",
"hf",
".",
"Seek",
"(",
"0",
",",
"io",
".",
"SeekCurrent",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"hf",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"hf",
".",
"Truncate",
"(",
"off",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"hf",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"hf",
".",
"Sync",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"hf",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"hf",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"hf",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"}",
"\n\n",
"p",
",",
"_",
",",
"err",
":=",
"nextSequenceFile",
"(",
"w",
".",
"dirFile",
".",
"Name",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"f",
",",
"err",
":=",
"w",
".",
"createSegmentFile",
"(",
"p",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"go",
"func",
"(",
")",
"{",
"w",
".",
"actorc",
"<-",
"func",
"(",
")",
"error",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"w",
".",
"dirFile",
".",
"Sync",
"(",
")",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"w",
".",
"files",
"=",
"append",
"(",
"w",
".",
"files",
",",
"newSegmentFile",
"(",
"f",
")",
")",
"\n\n",
"// TODO(gouthamve): make the buffer size a constant.",
"w",
".",
"cur",
"=",
"bufio",
".",
"NewWriterSize",
"(",
"f",
",",
"8",
"*",
"1024",
"*",
"1024",
")",
"\n",
"w",
".",
"curN",
"=",
"8",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// cut finishes the currently active segments and opens the next one.
// The encoder is reset to point to the new segment.
|
[
"cut",
"finishes",
"the",
"currently",
"active",
"segments",
"and",
"opens",
"the",
"next",
"one",
".",
"The",
"encoder",
"is",
"reset",
"to",
"point",
"to",
"the",
"new",
"segment",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L567-L617
|
train
|
prometheus/tsdb
|
wal.go
|
Sync
|
func (w *SegmentWAL) Sync() error {
var head *segmentFile
var err error
// Flush the writer and retrieve the reference to the head segment under mutex lock.
func() {
w.mtx.Lock()
defer w.mtx.Unlock()
if err = w.flush(); err != nil {
return
}
head = w.head()
}()
if err != nil {
return errors.Wrap(err, "flush buffer")
}
if head != nil {
// But only fsync the head segment after releasing the mutex as it will block on disk I/O.
start := time.Now()
err := fileutil.Fdatasync(head.File)
w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
return err
}
return nil
}
|
go
|
func (w *SegmentWAL) Sync() error {
var head *segmentFile
var err error
// Flush the writer and retrieve the reference to the head segment under mutex lock.
func() {
w.mtx.Lock()
defer w.mtx.Unlock()
if err = w.flush(); err != nil {
return
}
head = w.head()
}()
if err != nil {
return errors.Wrap(err, "flush buffer")
}
if head != nil {
// But only fsync the head segment after releasing the mutex as it will block on disk I/O.
start := time.Now()
err := fileutil.Fdatasync(head.File)
w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
return err
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"Sync",
"(",
")",
"error",
"{",
"var",
"head",
"*",
"segmentFile",
"\n",
"var",
"err",
"error",
"\n\n",
"// Flush the writer and retrieve the reference to the head segment under mutex lock.",
"func",
"(",
")",
"{",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n",
"if",
"err",
"=",
"w",
".",
"flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"head",
"=",
"w",
".",
"head",
"(",
")",
"\n",
"}",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"head",
"!=",
"nil",
"{",
"// But only fsync the head segment after releasing the mutex as it will block on disk I/O.",
"start",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"err",
":=",
"fileutil",
".",
"Fdatasync",
"(",
"head",
".",
"File",
")",
"\n",
"w",
".",
"metrics",
".",
"fsyncDuration",
".",
"Observe",
"(",
"time",
".",
"Since",
"(",
"start",
")",
".",
"Seconds",
"(",
")",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// Sync flushes the changes to disk.
|
[
"Sync",
"flushes",
"the",
"changes",
"to",
"disk",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L627-L651
|
train
|
prometheus/tsdb
|
wal.go
|
Close
|
func (w *SegmentWAL) Close() error {
// Make sure you can call Close() multiple times.
select {
case <-w.stopc:
return nil // Already closed.
default:
}
close(w.stopc)
<-w.donec
w.mtx.Lock()
defer w.mtx.Unlock()
if err := w.sync(); err != nil {
return err
}
// On opening, a WAL must be fully consumed once. Afterwards
// only the current segment will still be open.
if hf := w.head(); hf != nil {
if err := hf.Close(); err != nil {
return errors.Wrapf(err, "closing WAL head %s", hf.Name())
}
}
return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name())
}
|
go
|
func (w *SegmentWAL) Close() error {
// Make sure you can call Close() multiple times.
select {
case <-w.stopc:
return nil // Already closed.
default:
}
close(w.stopc)
<-w.donec
w.mtx.Lock()
defer w.mtx.Unlock()
if err := w.sync(); err != nil {
return err
}
// On opening, a WAL must be fully consumed once. Afterwards
// only the current segment will still be open.
if hf := w.head(); hf != nil {
if err := hf.Close(); err != nil {
return errors.Wrapf(err, "closing WAL head %s", hf.Name())
}
}
return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name())
}
|
[
"func",
"(",
"w",
"*",
"SegmentWAL",
")",
"Close",
"(",
")",
"error",
"{",
"// Make sure you can call Close() multiple times.",
"select",
"{",
"case",
"<-",
"w",
".",
"stopc",
":",
"return",
"nil",
"// Already closed.",
"\n",
"default",
":",
"}",
"\n\n",
"close",
"(",
"w",
".",
"stopc",
")",
"\n",
"<-",
"w",
".",
"donec",
"\n\n",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"err",
":=",
"w",
".",
"sync",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// On opening, a WAL must be fully consumed once. Afterwards",
"// only the current segment will still be open.",
"if",
"hf",
":=",
"w",
".",
"head",
"(",
")",
";",
"hf",
"!=",
"nil",
"{",
"if",
"err",
":=",
"hf",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"hf",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"errors",
".",
"Wrapf",
"(",
"w",
".",
"dirFile",
".",
"Close",
"(",
")",
",",
"\"",
"\"",
",",
"w",
".",
"dirFile",
".",
"Name",
"(",
")",
")",
"\n",
"}"
] |
// Close syncs all data and closes the underlying resources.
|
[
"Close",
"syncs",
"all",
"data",
"and",
"closes",
"the",
"underlying",
"resources",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L711-L737
|
train
|
prometheus/tsdb
|
wal.go
|
next
|
func (r *walReader) next() bool {
if r.cur >= len(r.files) {
return false
}
cf := r.files[r.cur]
// Remember the offset after the last correctly read entry. If the next one
// is corrupted, this is where we can safely truncate.
r.lastOffset, r.err = cf.Seek(0, io.SeekCurrent)
if r.err != nil {
return false
}
et, flag, b, err := r.entry(cf)
// If we reached the end of the reader, advance to the next one
// and close.
// Do not close on the last one as it will still be appended to.
if err == io.EOF {
if r.cur == len(r.files)-1 {
return false
}
// Current reader completed, close and move to the next one.
if err := cf.Close(); err != nil {
r.err = err
return false
}
r.cur++
return r.next()
}
if err != nil {
r.err = err
return false
}
r.curType = et
r.curFlag = flag
r.curBuf = b
return r.err == nil
}
|
go
|
func (r *walReader) next() bool {
if r.cur >= len(r.files) {
return false
}
cf := r.files[r.cur]
// Remember the offset after the last correctly read entry. If the next one
// is corrupted, this is where we can safely truncate.
r.lastOffset, r.err = cf.Seek(0, io.SeekCurrent)
if r.err != nil {
return false
}
et, flag, b, err := r.entry(cf)
// If we reached the end of the reader, advance to the next one
// and close.
// Do not close on the last one as it will still be appended to.
if err == io.EOF {
if r.cur == len(r.files)-1 {
return false
}
// Current reader completed, close and move to the next one.
if err := cf.Close(); err != nil {
r.err = err
return false
}
r.cur++
return r.next()
}
if err != nil {
r.err = err
return false
}
r.curType = et
r.curFlag = flag
r.curBuf = b
return r.err == nil
}
|
[
"func",
"(",
"r",
"*",
"walReader",
")",
"next",
"(",
")",
"bool",
"{",
"if",
"r",
".",
"cur",
">=",
"len",
"(",
"r",
".",
"files",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"cf",
":=",
"r",
".",
"files",
"[",
"r",
".",
"cur",
"]",
"\n\n",
"// Remember the offset after the last correctly read entry. If the next one",
"// is corrupted, this is where we can safely truncate.",
"r",
".",
"lastOffset",
",",
"r",
".",
"err",
"=",
"cf",
".",
"Seek",
"(",
"0",
",",
"io",
".",
"SeekCurrent",
")",
"\n",
"if",
"r",
".",
"err",
"!=",
"nil",
"{",
"return",
"false",
"\n",
"}",
"\n\n",
"et",
",",
"flag",
",",
"b",
",",
"err",
":=",
"r",
".",
"entry",
"(",
"cf",
")",
"\n",
"// If we reached the end of the reader, advance to the next one",
"// and close.",
"// Do not close on the last one as it will still be appended to.",
"if",
"err",
"==",
"io",
".",
"EOF",
"{",
"if",
"r",
".",
"cur",
"==",
"len",
"(",
"r",
".",
"files",
")",
"-",
"1",
"{",
"return",
"false",
"\n",
"}",
"\n",
"// Current reader completed, close and move to the next one.",
"if",
"err",
":=",
"cf",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"r",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"r",
".",
"cur",
"++",
"\n",
"return",
"r",
".",
"next",
"(",
")",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"err",
"=",
"err",
"\n",
"return",
"false",
"\n",
"}",
"\n\n",
"r",
".",
"curType",
"=",
"et",
"\n",
"r",
".",
"curFlag",
"=",
"flag",
"\n",
"r",
".",
"curBuf",
"=",
"b",
"\n",
"return",
"r",
".",
"err",
"==",
"nil",
"\n",
"}"
] |
// next returns decodes the next entry pair and returns true
// if it was successful.
|
[
"next",
"returns",
"decodes",
"the",
"next",
"entry",
"pair",
"and",
"returns",
"true",
"if",
"it",
"was",
"successful",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L1013-L1051
|
train
|
prometheus/tsdb
|
wal.go
|
MigrateWAL
|
func MigrateWAL(logger log.Logger, dir string) (err error) {
if logger == nil {
logger = log.NewNopLogger()
}
if exists, err := deprecatedWALExists(logger, dir); err != nil || !exists {
return err
}
level.Info(logger).Log("msg", "migrating WAL format")
tmpdir := dir + ".tmp"
if err := os.RemoveAll(tmpdir); err != nil {
return errors.Wrap(err, "cleanup replacement dir")
}
repl, err := wal.New(logger, nil, tmpdir)
if err != nil {
return errors.Wrap(err, "open new WAL")
}
// It should've already been closed as part of the previous finalization.
// Do it once again in case of prior errors.
defer func() {
if err != nil {
repl.Close()
}
}()
w, err := OpenSegmentWAL(dir, logger, time.Minute, nil)
if err != nil {
return errors.Wrap(err, "open old WAL")
}
defer w.Close()
rdr := w.Reader()
var (
enc RecordEncoder
b []byte
)
decErr := rdr.Read(
func(s []RefSeries) {
if err != nil {
return
}
err = repl.Log(enc.Series(s, b[:0]))
},
func(s []RefSample) {
if err != nil {
return
}
err = repl.Log(enc.Samples(s, b[:0]))
},
func(s []Stone) {
if err != nil {
return
}
err = repl.Log(enc.Tombstones(s, b[:0]))
},
)
if decErr != nil {
return errors.Wrap(err, "decode old entries")
}
if err != nil {
return errors.Wrap(err, "write new entries")
}
// We explicitly close even when there is a defer for Windows to be
// able to delete it. The defer is in place to close it in-case there
// are errors above.
if err := w.Close(); err != nil {
return errors.Wrap(err, "close old WAL")
}
if err := repl.Close(); err != nil {
return errors.Wrap(err, "close new WAL")
}
if err := fileutil.Replace(tmpdir, dir); err != nil {
return errors.Wrap(err, "replace old WAL")
}
return nil
}
|
go
|
func MigrateWAL(logger log.Logger, dir string) (err error) {
if logger == nil {
logger = log.NewNopLogger()
}
if exists, err := deprecatedWALExists(logger, dir); err != nil || !exists {
return err
}
level.Info(logger).Log("msg", "migrating WAL format")
tmpdir := dir + ".tmp"
if err := os.RemoveAll(tmpdir); err != nil {
return errors.Wrap(err, "cleanup replacement dir")
}
repl, err := wal.New(logger, nil, tmpdir)
if err != nil {
return errors.Wrap(err, "open new WAL")
}
// It should've already been closed as part of the previous finalization.
// Do it once again in case of prior errors.
defer func() {
if err != nil {
repl.Close()
}
}()
w, err := OpenSegmentWAL(dir, logger, time.Minute, nil)
if err != nil {
return errors.Wrap(err, "open old WAL")
}
defer w.Close()
rdr := w.Reader()
var (
enc RecordEncoder
b []byte
)
decErr := rdr.Read(
func(s []RefSeries) {
if err != nil {
return
}
err = repl.Log(enc.Series(s, b[:0]))
},
func(s []RefSample) {
if err != nil {
return
}
err = repl.Log(enc.Samples(s, b[:0]))
},
func(s []Stone) {
if err != nil {
return
}
err = repl.Log(enc.Tombstones(s, b[:0]))
},
)
if decErr != nil {
return errors.Wrap(err, "decode old entries")
}
if err != nil {
return errors.Wrap(err, "write new entries")
}
// We explicitly close even when there is a defer for Windows to be
// able to delete it. The defer is in place to close it in-case there
// are errors above.
if err := w.Close(); err != nil {
return errors.Wrap(err, "close old WAL")
}
if err := repl.Close(); err != nil {
return errors.Wrap(err, "close new WAL")
}
if err := fileutil.Replace(tmpdir, dir); err != nil {
return errors.Wrap(err, "replace old WAL")
}
return nil
}
|
[
"func",
"MigrateWAL",
"(",
"logger",
"log",
".",
"Logger",
",",
"dir",
"string",
")",
"(",
"err",
"error",
")",
"{",
"if",
"logger",
"==",
"nil",
"{",
"logger",
"=",
"log",
".",
"NewNopLogger",
"(",
")",
"\n",
"}",
"\n",
"if",
"exists",
",",
"err",
":=",
"deprecatedWALExists",
"(",
"logger",
",",
"dir",
")",
";",
"err",
"!=",
"nil",
"||",
"!",
"exists",
"{",
"return",
"err",
"\n",
"}",
"\n",
"level",
".",
"Info",
"(",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n\n",
"tmpdir",
":=",
"dir",
"+",
"\"",
"\"",
"\n",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"tmpdir",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"repl",
",",
"err",
":=",
"wal",
".",
"New",
"(",
"logger",
",",
"nil",
",",
"tmpdir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"// It should've already been closed as part of the previous finalization.",
"// Do it once again in case of prior errors.",
"defer",
"func",
"(",
")",
"{",
"if",
"err",
"!=",
"nil",
"{",
"repl",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"w",
",",
"err",
":=",
"OpenSegmentWAL",
"(",
"dir",
",",
"logger",
",",
"time",
".",
"Minute",
",",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"defer",
"w",
".",
"Close",
"(",
")",
"\n\n",
"rdr",
":=",
"w",
".",
"Reader",
"(",
")",
"\n\n",
"var",
"(",
"enc",
"RecordEncoder",
"\n",
"b",
"[",
"]",
"byte",
"\n",
")",
"\n",
"decErr",
":=",
"rdr",
".",
"Read",
"(",
"func",
"(",
"s",
"[",
"]",
"RefSeries",
")",
"{",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"err",
"=",
"repl",
".",
"Log",
"(",
"enc",
".",
"Series",
"(",
"s",
",",
"b",
"[",
":",
"0",
"]",
")",
")",
"\n",
"}",
",",
"func",
"(",
"s",
"[",
"]",
"RefSample",
")",
"{",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"err",
"=",
"repl",
".",
"Log",
"(",
"enc",
".",
"Samples",
"(",
"s",
",",
"b",
"[",
":",
"0",
"]",
")",
")",
"\n",
"}",
",",
"func",
"(",
"s",
"[",
"]",
"Stone",
")",
"{",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"err",
"=",
"repl",
".",
"Log",
"(",
"enc",
".",
"Tombstones",
"(",
"s",
",",
"b",
"[",
":",
"0",
"]",
")",
")",
"\n",
"}",
",",
")",
"\n",
"if",
"decErr",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"// We explicitly close even when there is a defer for Windows to be",
"// able to delete it. The defer is in place to close it in-case there",
"// are errors above.",
"if",
"err",
":=",
"w",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"repl",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"fileutil",
".",
"Replace",
"(",
"tmpdir",
",",
"dir",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// MigrateWAL rewrites the deprecated write ahead log into the new format.
|
[
"MigrateWAL",
"rewrites",
"the",
"deprecated",
"write",
"ahead",
"log",
"into",
"the",
"new",
"format",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L1235-L1312
|
train
|
prometheus/tsdb
|
db.go
|
Appender
|
func (db *DB) Appender() Appender {
return dbAppender{db: db, Appender: db.head.Appender()}
}
|
go
|
func (db *DB) Appender() Appender {
return dbAppender{db: db, Appender: db.head.Appender()}
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"Appender",
"(",
")",
"Appender",
"{",
"return",
"dbAppender",
"{",
"db",
":",
"db",
",",
"Appender",
":",
"db",
".",
"head",
".",
"Appender",
"(",
")",
"}",
"\n",
"}"
] |
// Appender opens a new appender against the database.
|
[
"Appender",
"opens",
"a",
"new",
"appender",
"against",
"the",
"database",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L379-L381
|
train
|
prometheus/tsdb
|
db.go
|
compact
|
func (db *DB) compact() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
// Check whether we have pending head blocks that are ready to be persisted.
// They have the highest priority.
for {
select {
case <-db.stopc:
return nil
default:
}
if !db.head.compactable() {
break
}
mint := db.head.MinTime()
maxt := rangeForTimestamp(mint, db.head.chunkRange)
// Wrap head into a range that bounds all reads to it.
head := &rangeHead{
head: db.head,
mint: mint,
// We remove 1 millisecond from maxt because block
// intervals are half-open: [b.MinTime, b.MaxTime). But
// chunk intervals are closed: [c.MinTime, c.MaxTime];
// so in order to make sure that overlaps are evaluated
// consistently, we explicitly remove the last value
// from the block interval here.
maxt: maxt - 1,
}
uid, err := db.compactor.Write(db.dir, head, mint, maxt, nil)
if err != nil {
return errors.Wrap(err, "persist head block")
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete persisted head block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
if (uid == ulid.ULID{}) {
// Compaction resulted in an empty block.
// Head truncating during db.reload() depends on the persisted blocks and
// in this case no new block will be persisted so manually truncate the head.
if err = db.head.Truncate(maxt); err != nil {
return errors.Wrap(err, "head truncate failed (in compact)")
}
}
runtime.GC()
}
// Check for compactions of multiple blocks.
for {
plan, err := db.compactor.Plan(db.dir)
if err != nil {
return errors.Wrap(err, "plan compaction")
}
if len(plan) == 0 {
break
}
select {
case <-db.stopc:
return nil
default:
}
uid, err := db.compactor.Compact(db.dir, plan, db.blocks)
if err != nil {
return errors.Wrapf(err, "compact %s", plan)
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
runtime.GC()
}
return nil
}
|
go
|
func (db *DB) compact() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
// Check whether we have pending head blocks that are ready to be persisted.
// They have the highest priority.
for {
select {
case <-db.stopc:
return nil
default:
}
if !db.head.compactable() {
break
}
mint := db.head.MinTime()
maxt := rangeForTimestamp(mint, db.head.chunkRange)
// Wrap head into a range that bounds all reads to it.
head := &rangeHead{
head: db.head,
mint: mint,
// We remove 1 millisecond from maxt because block
// intervals are half-open: [b.MinTime, b.MaxTime). But
// chunk intervals are closed: [c.MinTime, c.MaxTime];
// so in order to make sure that overlaps are evaluated
// consistently, we explicitly remove the last value
// from the block interval here.
maxt: maxt - 1,
}
uid, err := db.compactor.Write(db.dir, head, mint, maxt, nil)
if err != nil {
return errors.Wrap(err, "persist head block")
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete persisted head block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
if (uid == ulid.ULID{}) {
// Compaction resulted in an empty block.
// Head truncating during db.reload() depends on the persisted blocks and
// in this case no new block will be persisted so manually truncate the head.
if err = db.head.Truncate(maxt); err != nil {
return errors.Wrap(err, "head truncate failed (in compact)")
}
}
runtime.GC()
}
// Check for compactions of multiple blocks.
for {
plan, err := db.compactor.Plan(db.dir)
if err != nil {
return errors.Wrap(err, "plan compaction")
}
if len(plan) == 0 {
break
}
select {
case <-db.stopc:
return nil
default:
}
uid, err := db.compactor.Compact(db.dir, plan, db.blocks)
if err != nil {
return errors.Wrapf(err, "compact %s", plan)
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
runtime.GC()
}
return nil
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"compact",
"(",
")",
"(",
"err",
"error",
")",
"{",
"db",
".",
"cmtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"db",
".",
"cmtx",
".",
"Unlock",
"(",
")",
"\n",
"// Check whether we have pending head blocks that are ready to be persisted.",
"// They have the highest priority.",
"for",
"{",
"select",
"{",
"case",
"<-",
"db",
".",
"stopc",
":",
"return",
"nil",
"\n",
"default",
":",
"}",
"\n",
"if",
"!",
"db",
".",
"head",
".",
"compactable",
"(",
")",
"{",
"break",
"\n",
"}",
"\n",
"mint",
":=",
"db",
".",
"head",
".",
"MinTime",
"(",
")",
"\n",
"maxt",
":=",
"rangeForTimestamp",
"(",
"mint",
",",
"db",
".",
"head",
".",
"chunkRange",
")",
"\n\n",
"// Wrap head into a range that bounds all reads to it.",
"head",
":=",
"&",
"rangeHead",
"{",
"head",
":",
"db",
".",
"head",
",",
"mint",
":",
"mint",
",",
"// We remove 1 millisecond from maxt because block",
"// intervals are half-open: [b.MinTime, b.MaxTime). But",
"// chunk intervals are closed: [c.MinTime, c.MaxTime];",
"// so in order to make sure that overlaps are evaluated",
"// consistently, we explicitly remove the last value",
"// from the block interval here.",
"maxt",
":",
"maxt",
"-",
"1",
",",
"}",
"\n",
"uid",
",",
"err",
":=",
"db",
".",
"compactor",
".",
"Write",
"(",
"db",
".",
"dir",
",",
"head",
",",
"mint",
",",
"maxt",
",",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"runtime",
".",
"GC",
"(",
")",
"\n\n",
"if",
"err",
":=",
"db",
".",
"reload",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"filepath",
".",
"Join",
"(",
"db",
".",
"dir",
",",
"uid",
".",
"String",
"(",
")",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"uid",
")",
"\n",
"}",
"\n",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"(",
"uid",
"==",
"ulid",
".",
"ULID",
"{",
"}",
")",
"{",
"// Compaction resulted in an empty block.",
"// Head truncating during db.reload() depends on the persisted blocks and",
"// in this case no new block will be persisted so manually truncate the head.",
"if",
"err",
"=",
"db",
".",
"head",
".",
"Truncate",
"(",
"maxt",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}",
"\n",
"runtime",
".",
"GC",
"(",
")",
"\n",
"}",
"\n\n",
"// Check for compactions of multiple blocks.",
"for",
"{",
"plan",
",",
"err",
":=",
"db",
".",
"compactor",
".",
"Plan",
"(",
"db",
".",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"len",
"(",
"plan",
")",
"==",
"0",
"{",
"break",
"\n",
"}",
"\n\n",
"select",
"{",
"case",
"<-",
"db",
".",
"stopc",
":",
"return",
"nil",
"\n",
"default",
":",
"}",
"\n\n",
"uid",
",",
"err",
":=",
"db",
".",
"compactor",
".",
"Compact",
"(",
"db",
".",
"dir",
",",
"plan",
",",
"db",
".",
"blocks",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"plan",
")",
"\n",
"}",
"\n",
"runtime",
".",
"GC",
"(",
")",
"\n\n",
"if",
"err",
":=",
"db",
".",
"reload",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"filepath",
".",
"Join",
"(",
"db",
".",
"dir",
",",
"uid",
".",
"String",
"(",
")",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"uid",
")",
"\n",
"}",
"\n",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"runtime",
".",
"GC",
"(",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// Compact data if possible. After successful compaction blocks are reloaded
// which will also trigger blocks to be deleted that fall out of the retention
// window.
// If no blocks are compacted, the retention window state doesn't change. Thus,
// this is sufficient to reliably delete old data.
// Old blocks are only deleted on reload based on the new block's parent information.
// See DB.reload documentation for further information.
|
[
"Compact",
"data",
"if",
"possible",
".",
"After",
"successful",
"compaction",
"blocks",
"are",
"reloaded",
"which",
"will",
"also",
"trigger",
"blocks",
"to",
"be",
"deleted",
"that",
"fall",
"out",
"of",
"the",
"retention",
"window",
".",
"If",
"no",
"blocks",
"are",
"compacted",
"the",
"retention",
"window",
"state",
"doesn",
"t",
"change",
".",
"Thus",
"this",
"is",
"sufficient",
"to",
"reliably",
"delete",
"old",
"data",
".",
"Old",
"blocks",
"are",
"only",
"deleted",
"on",
"reload",
"based",
"on",
"the",
"new",
"block",
"s",
"parent",
"information",
".",
"See",
"DB",
".",
"reload",
"documentation",
"for",
"further",
"information",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L411-L496
|
train
|
prometheus/tsdb
|
db.go
|
deletableBlocks
|
func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block {
deletable := make(map[ulid.ULID]*Block)
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime
})
for _, block := range blocks {
if block.Meta().Compaction.Deletable {
deletable[block.Meta().ULID] = block
}
}
for ulid, block := range db.beyondTimeRetention(blocks) {
deletable[ulid] = block
}
for ulid, block := range db.beyondSizeRetention(blocks) {
deletable[ulid] = block
}
return deletable
}
|
go
|
func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block {
deletable := make(map[ulid.ULID]*Block)
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime
})
for _, block := range blocks {
if block.Meta().Compaction.Deletable {
deletable[block.Meta().ULID] = block
}
}
for ulid, block := range db.beyondTimeRetention(blocks) {
deletable[ulid] = block
}
for ulid, block := range db.beyondSizeRetention(blocks) {
deletable[ulid] = block
}
return deletable
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"deletableBlocks",
"(",
"blocks",
"[",
"]",
"*",
"Block",
")",
"map",
"[",
"ulid",
".",
"ULID",
"]",
"*",
"Block",
"{",
"deletable",
":=",
"make",
"(",
"map",
"[",
"ulid",
".",
"ULID",
"]",
"*",
"Block",
")",
"\n\n",
"// Sort the blocks by time - newest to oldest (largest to smallest timestamp).",
"// This ensures that the retentions will remove the oldest blocks.",
"sort",
".",
"Slice",
"(",
"blocks",
",",
"func",
"(",
"i",
",",
"j",
"int",
")",
"bool",
"{",
"return",
"blocks",
"[",
"i",
"]",
".",
"Meta",
"(",
")",
".",
"MaxTime",
">",
"blocks",
"[",
"j",
"]",
".",
"Meta",
"(",
")",
".",
"MaxTime",
"\n",
"}",
")",
"\n\n",
"for",
"_",
",",
"block",
":=",
"range",
"blocks",
"{",
"if",
"block",
".",
"Meta",
"(",
")",
".",
"Compaction",
".",
"Deletable",
"{",
"deletable",
"[",
"block",
".",
"Meta",
"(",
")",
".",
"ULID",
"]",
"=",
"block",
"\n",
"}",
"\n",
"}",
"\n\n",
"for",
"ulid",
",",
"block",
":=",
"range",
"db",
".",
"beyondTimeRetention",
"(",
"blocks",
")",
"{",
"deletable",
"[",
"ulid",
"]",
"=",
"block",
"\n",
"}",
"\n\n",
"for",
"ulid",
",",
"block",
":=",
"range",
"db",
".",
"beyondSizeRetention",
"(",
"blocks",
")",
"{",
"deletable",
"[",
"ulid",
"]",
"=",
"block",
"\n",
"}",
"\n\n",
"return",
"deletable",
"\n",
"}"
] |
// deletableBlocks returns all blocks past retention policy.
|
[
"deletableBlocks",
"returns",
"all",
"blocks",
"past",
"retention",
"policy",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L634-L658
|
train
|
prometheus/tsdb
|
db.go
|
deleteBlocks
|
func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
for ulid, block := range blocks {
if block != nil {
if err := block.Close(); err != nil {
level.Warn(db.logger).Log("msg", "closing block failed", "err", err)
}
}
if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil {
return errors.Wrapf(err, "delete obsolete block %s", ulid)
}
}
return nil
}
|
go
|
func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
for ulid, block := range blocks {
if block != nil {
if err := block.Close(); err != nil {
level.Warn(db.logger).Log("msg", "closing block failed", "err", err)
}
}
if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil {
return errors.Wrapf(err, "delete obsolete block %s", ulid)
}
}
return nil
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"deleteBlocks",
"(",
"blocks",
"map",
"[",
"ulid",
".",
"ULID",
"]",
"*",
"Block",
")",
"error",
"{",
"for",
"ulid",
",",
"block",
":=",
"range",
"blocks",
"{",
"if",
"block",
"!=",
"nil",
"{",
"if",
"err",
":=",
"block",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"level",
".",
"Warn",
"(",
"db",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"filepath",
".",
"Join",
"(",
"db",
".",
"dir",
",",
"ulid",
".",
"String",
"(",
")",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"ulid",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// deleteBlocks closes and deletes blocks from the disk.
// When the map contains a non nil block object it means it is loaded in memory
// so needs to be closed first as it might need to wait for pending readers to complete.
|
[
"deleteBlocks",
"closes",
"and",
"deletes",
"blocks",
"from",
"the",
"disk",
".",
"When",
"the",
"map",
"contains",
"a",
"non",
"nil",
"block",
"object",
"it",
"means",
"it",
"is",
"loaded",
"in",
"memory",
"so",
"needs",
"to",
"be",
"closed",
"first",
"as",
"it",
"might",
"need",
"to",
"wait",
"for",
"pending",
"readers",
"to",
"complete",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L706-L718
|
train
|
prometheus/tsdb
|
db.go
|
validateBlockSequence
|
func validateBlockSequence(bs []*Block) error {
if len(bs) <= 1 {
return nil
}
var metas []BlockMeta
for _, b := range bs {
metas = append(metas, b.meta)
}
overlaps := OverlappingBlocks(metas)
if len(overlaps) > 0 {
return errors.Errorf("block time ranges overlap: %s", overlaps)
}
return nil
}
|
go
|
func validateBlockSequence(bs []*Block) error {
if len(bs) <= 1 {
return nil
}
var metas []BlockMeta
for _, b := range bs {
metas = append(metas, b.meta)
}
overlaps := OverlappingBlocks(metas)
if len(overlaps) > 0 {
return errors.Errorf("block time ranges overlap: %s", overlaps)
}
return nil
}
|
[
"func",
"validateBlockSequence",
"(",
"bs",
"[",
"]",
"*",
"Block",
")",
"error",
"{",
"if",
"len",
"(",
"bs",
")",
"<=",
"1",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"var",
"metas",
"[",
"]",
"BlockMeta",
"\n",
"for",
"_",
",",
"b",
":=",
"range",
"bs",
"{",
"metas",
"=",
"append",
"(",
"metas",
",",
"b",
".",
"meta",
")",
"\n",
"}",
"\n\n",
"overlaps",
":=",
"OverlappingBlocks",
"(",
"metas",
")",
"\n",
"if",
"len",
"(",
"overlaps",
")",
">",
"0",
"{",
"return",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"overlaps",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// validateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence.
|
[
"validateBlockSequence",
"returns",
"error",
"if",
"given",
"block",
"meta",
"files",
"indicate",
"that",
"some",
"blocks",
"overlaps",
"within",
"sequence",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L721-L737
|
train
|
prometheus/tsdb
|
db.go
|
String
|
func (o Overlaps) String() string {
var res []string
for r, overlaps := range o {
var groups []string
for _, m := range overlaps {
groups = append(groups, fmt.Sprintf(
"<ulid: %s, mint: %d, maxt: %d, range: %s>",
m.ULID.String(),
m.MinTime,
m.MaxTime,
(time.Duration((m.MaxTime-m.MinTime)/1000)*time.Second).String(),
))
}
res = append(res, fmt.Sprintf(
"[mint: %d, maxt: %d, range: %s, blocks: %d]: %s",
r.Min, r.Max,
(time.Duration((r.Max-r.Min)/1000)*time.Second).String(),
len(overlaps),
strings.Join(groups, ", ")),
)
}
return strings.Join(res, "\n")
}
|
go
|
func (o Overlaps) String() string {
var res []string
for r, overlaps := range o {
var groups []string
for _, m := range overlaps {
groups = append(groups, fmt.Sprintf(
"<ulid: %s, mint: %d, maxt: %d, range: %s>",
m.ULID.String(),
m.MinTime,
m.MaxTime,
(time.Duration((m.MaxTime-m.MinTime)/1000)*time.Second).String(),
))
}
res = append(res, fmt.Sprintf(
"[mint: %d, maxt: %d, range: %s, blocks: %d]: %s",
r.Min, r.Max,
(time.Duration((r.Max-r.Min)/1000)*time.Second).String(),
len(overlaps),
strings.Join(groups, ", ")),
)
}
return strings.Join(res, "\n")
}
|
[
"func",
"(",
"o",
"Overlaps",
")",
"String",
"(",
")",
"string",
"{",
"var",
"res",
"[",
"]",
"string",
"\n",
"for",
"r",
",",
"overlaps",
":=",
"range",
"o",
"{",
"var",
"groups",
"[",
"]",
"string",
"\n",
"for",
"_",
",",
"m",
":=",
"range",
"overlaps",
"{",
"groups",
"=",
"append",
"(",
"groups",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"m",
".",
"ULID",
".",
"String",
"(",
")",
",",
"m",
".",
"MinTime",
",",
"m",
".",
"MaxTime",
",",
"(",
"time",
".",
"Duration",
"(",
"(",
"m",
".",
"MaxTime",
"-",
"m",
".",
"MinTime",
")",
"/",
"1000",
")",
"*",
"time",
".",
"Second",
")",
".",
"String",
"(",
")",
",",
")",
")",
"\n",
"}",
"\n",
"res",
"=",
"append",
"(",
"res",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"r",
".",
"Min",
",",
"r",
".",
"Max",
",",
"(",
"time",
".",
"Duration",
"(",
"(",
"r",
".",
"Max",
"-",
"r",
".",
"Min",
")",
"/",
"1000",
")",
"*",
"time",
".",
"Second",
")",
".",
"String",
"(",
")",
",",
"len",
"(",
"overlaps",
")",
",",
"strings",
".",
"Join",
"(",
"groups",
",",
"\"",
"\"",
")",
")",
",",
")",
"\n",
"}",
"\n",
"return",
"strings",
".",
"Join",
"(",
"res",
",",
"\"",
"\\n",
"\"",
")",
"\n",
"}"
] |
// String returns human readable string form of overlapped blocks.
|
[
"String",
"returns",
"human",
"readable",
"string",
"form",
"of",
"overlapped",
"blocks",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L748-L770
|
train
|
prometheus/tsdb
|
db.go
|
OverlappingBlocks
|
func OverlappingBlocks(bm []BlockMeta) Overlaps {
if len(bm) <= 1 {
return nil
}
var (
overlaps [][]BlockMeta
// pending contains not ended blocks in regards to "current" timestamp.
pending = []BlockMeta{bm[0]}
// continuousPending helps to aggregate same overlaps to single group.
continuousPending = true
)
// We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp.
// We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current
// timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending.
for _, b := range bm[1:] {
var newPending []BlockMeta
for _, p := range pending {
// "b.MinTime" is our current time.
if b.MinTime >= p.MaxTime {
continuousPending = false
continue
}
// "p" overlaps with "b" and "p" is still pending.
newPending = append(newPending, p)
}
// Our block "b" is now pending.
pending = append(newPending, b)
if len(newPending) == 0 {
// No overlaps.
continue
}
if continuousPending && len(overlaps) > 0 {
overlaps[len(overlaps)-1] = append(overlaps[len(overlaps)-1], b)
continue
}
overlaps = append(overlaps, append(newPending, b))
// Start new pendings.
continuousPending = true
}
// Fetch the critical overlapped time range foreach overlap groups.
overlapGroups := Overlaps{}
for _, overlap := range overlaps {
minRange := TimeRange{Min: 0, Max: math.MaxInt64}
for _, b := range overlap {
if minRange.Max > b.MaxTime {
minRange.Max = b.MaxTime
}
if minRange.Min < b.MinTime {
minRange.Min = b.MinTime
}
}
overlapGroups[minRange] = overlap
}
return overlapGroups
}
|
go
|
func OverlappingBlocks(bm []BlockMeta) Overlaps {
if len(bm) <= 1 {
return nil
}
var (
overlaps [][]BlockMeta
// pending contains not ended blocks in regards to "current" timestamp.
pending = []BlockMeta{bm[0]}
// continuousPending helps to aggregate same overlaps to single group.
continuousPending = true
)
// We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp.
// We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current
// timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending.
for _, b := range bm[1:] {
var newPending []BlockMeta
for _, p := range pending {
// "b.MinTime" is our current time.
if b.MinTime >= p.MaxTime {
continuousPending = false
continue
}
// "p" overlaps with "b" and "p" is still pending.
newPending = append(newPending, p)
}
// Our block "b" is now pending.
pending = append(newPending, b)
if len(newPending) == 0 {
// No overlaps.
continue
}
if continuousPending && len(overlaps) > 0 {
overlaps[len(overlaps)-1] = append(overlaps[len(overlaps)-1], b)
continue
}
overlaps = append(overlaps, append(newPending, b))
// Start new pendings.
continuousPending = true
}
// Fetch the critical overlapped time range foreach overlap groups.
overlapGroups := Overlaps{}
for _, overlap := range overlaps {
minRange := TimeRange{Min: 0, Max: math.MaxInt64}
for _, b := range overlap {
if minRange.Max > b.MaxTime {
minRange.Max = b.MaxTime
}
if minRange.Min < b.MinTime {
minRange.Min = b.MinTime
}
}
overlapGroups[minRange] = overlap
}
return overlapGroups
}
|
[
"func",
"OverlappingBlocks",
"(",
"bm",
"[",
"]",
"BlockMeta",
")",
"Overlaps",
"{",
"if",
"len",
"(",
"bm",
")",
"<=",
"1",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"var",
"(",
"overlaps",
"[",
"]",
"[",
"]",
"BlockMeta",
"\n\n",
"// pending contains not ended blocks in regards to \"current\" timestamp.",
"pending",
"=",
"[",
"]",
"BlockMeta",
"{",
"bm",
"[",
"0",
"]",
"}",
"\n",
"// continuousPending helps to aggregate same overlaps to single group.",
"continuousPending",
"=",
"true",
"\n",
")",
"\n\n",
"// We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our \"current\" timestamp.",
"// We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current",
"// timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending.",
"for",
"_",
",",
"b",
":=",
"range",
"bm",
"[",
"1",
":",
"]",
"{",
"var",
"newPending",
"[",
"]",
"BlockMeta",
"\n\n",
"for",
"_",
",",
"p",
":=",
"range",
"pending",
"{",
"// \"b.MinTime\" is our current time.",
"if",
"b",
".",
"MinTime",
">=",
"p",
".",
"MaxTime",
"{",
"continuousPending",
"=",
"false",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// \"p\" overlaps with \"b\" and \"p\" is still pending.",
"newPending",
"=",
"append",
"(",
"newPending",
",",
"p",
")",
"\n",
"}",
"\n\n",
"// Our block \"b\" is now pending.",
"pending",
"=",
"append",
"(",
"newPending",
",",
"b",
")",
"\n",
"if",
"len",
"(",
"newPending",
")",
"==",
"0",
"{",
"// No overlaps.",
"continue",
"\n",
"}",
"\n\n",
"if",
"continuousPending",
"&&",
"len",
"(",
"overlaps",
")",
">",
"0",
"{",
"overlaps",
"[",
"len",
"(",
"overlaps",
")",
"-",
"1",
"]",
"=",
"append",
"(",
"overlaps",
"[",
"len",
"(",
"overlaps",
")",
"-",
"1",
"]",
",",
"b",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"overlaps",
"=",
"append",
"(",
"overlaps",
",",
"append",
"(",
"newPending",
",",
"b",
")",
")",
"\n",
"// Start new pendings.",
"continuousPending",
"=",
"true",
"\n",
"}",
"\n\n",
"// Fetch the critical overlapped time range foreach overlap groups.",
"overlapGroups",
":=",
"Overlaps",
"{",
"}",
"\n",
"for",
"_",
",",
"overlap",
":=",
"range",
"overlaps",
"{",
"minRange",
":=",
"TimeRange",
"{",
"Min",
":",
"0",
",",
"Max",
":",
"math",
".",
"MaxInt64",
"}",
"\n",
"for",
"_",
",",
"b",
":=",
"range",
"overlap",
"{",
"if",
"minRange",
".",
"Max",
">",
"b",
".",
"MaxTime",
"{",
"minRange",
".",
"Max",
"=",
"b",
".",
"MaxTime",
"\n",
"}",
"\n\n",
"if",
"minRange",
".",
"Min",
"<",
"b",
".",
"MinTime",
"{",
"minRange",
".",
"Min",
"=",
"b",
".",
"MinTime",
"\n",
"}",
"\n",
"}",
"\n",
"overlapGroups",
"[",
"minRange",
"]",
"=",
"overlap",
"\n",
"}",
"\n\n",
"return",
"overlapGroups",
"\n",
"}"
] |
// OverlappingBlocks returns all overlapping blocks from given meta files.
|
[
"OverlappingBlocks",
"returns",
"all",
"overlapping",
"blocks",
"from",
"given",
"meta",
"files",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L773-L837
|
train
|
prometheus/tsdb
|
db.go
|
Blocks
|
func (db *DB) Blocks() []*Block {
db.mtx.RLock()
defer db.mtx.RUnlock()
return db.blocks
}
|
go
|
func (db *DB) Blocks() []*Block {
db.mtx.RLock()
defer db.mtx.RUnlock()
return db.blocks
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"Blocks",
"(",
")",
"[",
"]",
"*",
"Block",
"{",
"db",
".",
"mtx",
".",
"RLock",
"(",
")",
"\n",
"defer",
"db",
".",
"mtx",
".",
"RUnlock",
"(",
")",
"\n\n",
"return",
"db",
".",
"blocks",
"\n",
"}"
] |
// Blocks returns the databases persisted blocks.
|
[
"Blocks",
"returns",
"the",
"databases",
"persisted",
"blocks",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L844-L849
|
train
|
prometheus/tsdb
|
db.go
|
Close
|
func (db *DB) Close() error {
close(db.stopc)
db.compactCancel()
<-db.donec
db.mtx.Lock()
defer db.mtx.Unlock()
var g errgroup.Group
// blocks also contains all head blocks.
for _, pb := range db.blocks {
g.Go(pb.Close)
}
var merr tsdb_errors.MultiError
merr.Add(g.Wait())
if db.lockf != nil {
merr.Add(db.lockf.Release())
}
merr.Add(db.head.Close())
return merr.Err()
}
|
go
|
func (db *DB) Close() error {
close(db.stopc)
db.compactCancel()
<-db.donec
db.mtx.Lock()
defer db.mtx.Unlock()
var g errgroup.Group
// blocks also contains all head blocks.
for _, pb := range db.blocks {
g.Go(pb.Close)
}
var merr tsdb_errors.MultiError
merr.Add(g.Wait())
if db.lockf != nil {
merr.Add(db.lockf.Release())
}
merr.Add(db.head.Close())
return merr.Err()
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"Close",
"(",
")",
"error",
"{",
"close",
"(",
"db",
".",
"stopc",
")",
"\n",
"db",
".",
"compactCancel",
"(",
")",
"\n",
"<-",
"db",
".",
"donec",
"\n\n",
"db",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"db",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n\n",
"var",
"g",
"errgroup",
".",
"Group",
"\n\n",
"// blocks also contains all head blocks.",
"for",
"_",
",",
"pb",
":=",
"range",
"db",
".",
"blocks",
"{",
"g",
".",
"Go",
"(",
"pb",
".",
"Close",
")",
"\n",
"}",
"\n\n",
"var",
"merr",
"tsdb_errors",
".",
"MultiError",
"\n\n",
"merr",
".",
"Add",
"(",
"g",
".",
"Wait",
"(",
")",
")",
"\n\n",
"if",
"db",
".",
"lockf",
"!=",
"nil",
"{",
"merr",
".",
"Add",
"(",
"db",
".",
"lockf",
".",
"Release",
"(",
")",
")",
"\n",
"}",
"\n",
"merr",
".",
"Add",
"(",
"db",
".",
"head",
".",
"Close",
"(",
")",
")",
"\n",
"return",
"merr",
".",
"Err",
"(",
")",
"\n",
"}"
] |
// Close the partition.
|
[
"Close",
"the",
"partition",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L857-L881
|
train
|
prometheus/tsdb
|
db.go
|
DisableCompactions
|
func (db *DB) DisableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = false
level.Info(db.logger).Log("msg", "compactions disabled")
}
|
go
|
func (db *DB) DisableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = false
level.Info(db.logger).Log("msg", "compactions disabled")
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"DisableCompactions",
"(",
")",
"{",
"db",
".",
"autoCompactMtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"db",
".",
"autoCompactMtx",
".",
"Unlock",
"(",
")",
"\n\n",
"db",
".",
"autoCompact",
"=",
"false",
"\n",
"level",
".",
"Info",
"(",
"db",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"}"
] |
// DisableCompactions disables auto compactions.
|
[
"DisableCompactions",
"disables",
"auto",
"compactions",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L884-L890
|
train
|
prometheus/tsdb
|
db.go
|
EnableCompactions
|
func (db *DB) EnableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = true
level.Info(db.logger).Log("msg", "compactions enabled")
}
|
go
|
func (db *DB) EnableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = true
level.Info(db.logger).Log("msg", "compactions enabled")
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"EnableCompactions",
"(",
")",
"{",
"db",
".",
"autoCompactMtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"db",
".",
"autoCompactMtx",
".",
"Unlock",
"(",
")",
"\n\n",
"db",
".",
"autoCompact",
"=",
"true",
"\n",
"level",
".",
"Info",
"(",
"db",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"}"
] |
// EnableCompactions enables auto compactions.
|
[
"EnableCompactions",
"enables",
"auto",
"compactions",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L893-L899
|
train
|
prometheus/tsdb
|
db.go
|
Querier
|
func (db *DB) Querier(mint, maxt int64) (Querier, error) {
var blocks []BlockReader
var blockMetas []BlockMeta
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
blocks = append(blocks, b)
blockMetas = append(blockMetas, b.Meta())
}
}
if maxt >= db.head.MinTime() {
blocks = append(blocks, &rangeHead{
head: db.head,
mint: mint,
maxt: maxt,
})
}
blockQueriers := make([]Querier, 0, len(blocks))
for _, b := range blocks {
q, err := NewBlockQuerier(b, mint, maxt)
if err == nil {
blockQueriers = append(blockQueriers, q)
continue
}
// If we fail, all previously opened queriers must be closed.
for _, q := range blockQueriers {
q.Close()
}
return nil, errors.Wrapf(err, "open querier for block %s", b)
}
if len(OverlappingBlocks(blockMetas)) > 0 {
return &verticalQuerier{
querier: querier{
blocks: blockQueriers,
},
}, nil
}
return &querier{
blocks: blockQueriers,
}, nil
}
|
go
|
func (db *DB) Querier(mint, maxt int64) (Querier, error) {
var blocks []BlockReader
var blockMetas []BlockMeta
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
blocks = append(blocks, b)
blockMetas = append(blockMetas, b.Meta())
}
}
if maxt >= db.head.MinTime() {
blocks = append(blocks, &rangeHead{
head: db.head,
mint: mint,
maxt: maxt,
})
}
blockQueriers := make([]Querier, 0, len(blocks))
for _, b := range blocks {
q, err := NewBlockQuerier(b, mint, maxt)
if err == nil {
blockQueriers = append(blockQueriers, q)
continue
}
// If we fail, all previously opened queriers must be closed.
for _, q := range blockQueriers {
q.Close()
}
return nil, errors.Wrapf(err, "open querier for block %s", b)
}
if len(OverlappingBlocks(blockMetas)) > 0 {
return &verticalQuerier{
querier: querier{
blocks: blockQueriers,
},
}, nil
}
return &querier{
blocks: blockQueriers,
}, nil
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"Querier",
"(",
"mint",
",",
"maxt",
"int64",
")",
"(",
"Querier",
",",
"error",
")",
"{",
"var",
"blocks",
"[",
"]",
"BlockReader",
"\n",
"var",
"blockMetas",
"[",
"]",
"BlockMeta",
"\n\n",
"db",
".",
"mtx",
".",
"RLock",
"(",
")",
"\n",
"defer",
"db",
".",
"mtx",
".",
"RUnlock",
"(",
")",
"\n\n",
"for",
"_",
",",
"b",
":=",
"range",
"db",
".",
"blocks",
"{",
"if",
"b",
".",
"OverlapsClosedInterval",
"(",
"mint",
",",
"maxt",
")",
"{",
"blocks",
"=",
"append",
"(",
"blocks",
",",
"b",
")",
"\n",
"blockMetas",
"=",
"append",
"(",
"blockMetas",
",",
"b",
".",
"Meta",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"maxt",
">=",
"db",
".",
"head",
".",
"MinTime",
"(",
")",
"{",
"blocks",
"=",
"append",
"(",
"blocks",
",",
"&",
"rangeHead",
"{",
"head",
":",
"db",
".",
"head",
",",
"mint",
":",
"mint",
",",
"maxt",
":",
"maxt",
",",
"}",
")",
"\n",
"}",
"\n\n",
"blockQueriers",
":=",
"make",
"(",
"[",
"]",
"Querier",
",",
"0",
",",
"len",
"(",
"blocks",
")",
")",
"\n",
"for",
"_",
",",
"b",
":=",
"range",
"blocks",
"{",
"q",
",",
"err",
":=",
"NewBlockQuerier",
"(",
"b",
",",
"mint",
",",
"maxt",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"blockQueriers",
"=",
"append",
"(",
"blockQueriers",
",",
"q",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"// If we fail, all previously opened queriers must be closed.",
"for",
"_",
",",
"q",
":=",
"range",
"blockQueriers",
"{",
"q",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"return",
"nil",
",",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"b",
")",
"\n",
"}",
"\n\n",
"if",
"len",
"(",
"OverlappingBlocks",
"(",
"blockMetas",
")",
")",
">",
"0",
"{",
"return",
"&",
"verticalQuerier",
"{",
"querier",
":",
"querier",
"{",
"blocks",
":",
"blockQueriers",
",",
"}",
",",
"}",
",",
"nil",
"\n",
"}",
"\n\n",
"return",
"&",
"querier",
"{",
"blocks",
":",
"blockQueriers",
",",
"}",
",",
"nil",
"\n",
"}"
] |
// Querier returns a new querier over the data partition for the given time range.
// A goroutine must not handle more than one open Querier.
|
[
"Querier",
"returns",
"a",
"new",
"querier",
"over",
"the",
"data",
"partition",
"for",
"the",
"given",
"time",
"range",
".",
"A",
"goroutine",
"must",
"not",
"handle",
"more",
"than",
"one",
"open",
"Querier",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L933-L979
|
train
|
prometheus/tsdb
|
db.go
|
Delete
|
func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error {
db.cmtx.Lock()
defer db.cmtx.Unlock()
var g errgroup.Group
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
g.Go(func(b *Block) func() error {
return func() error { return b.Delete(mint, maxt, ms...) }
}(b))
}
}
g.Go(func() error {
return db.head.Delete(mint, maxt, ms...)
})
return g.Wait()
}
|
go
|
func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error {
db.cmtx.Lock()
defer db.cmtx.Unlock()
var g errgroup.Group
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
g.Go(func(b *Block) func() error {
return func() error { return b.Delete(mint, maxt, ms...) }
}(b))
}
}
g.Go(func() error {
return db.head.Delete(mint, maxt, ms...)
})
return g.Wait()
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"Delete",
"(",
"mint",
",",
"maxt",
"int64",
",",
"ms",
"...",
"labels",
".",
"Matcher",
")",
"error",
"{",
"db",
".",
"cmtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"db",
".",
"cmtx",
".",
"Unlock",
"(",
")",
"\n\n",
"var",
"g",
"errgroup",
".",
"Group",
"\n\n",
"db",
".",
"mtx",
".",
"RLock",
"(",
")",
"\n",
"defer",
"db",
".",
"mtx",
".",
"RUnlock",
"(",
")",
"\n\n",
"for",
"_",
",",
"b",
":=",
"range",
"db",
".",
"blocks",
"{",
"if",
"b",
".",
"OverlapsClosedInterval",
"(",
"mint",
",",
"maxt",
")",
"{",
"g",
".",
"Go",
"(",
"func",
"(",
"b",
"*",
"Block",
")",
"func",
"(",
")",
"error",
"{",
"return",
"func",
"(",
")",
"error",
"{",
"return",
"b",
".",
"Delete",
"(",
"mint",
",",
"maxt",
",",
"ms",
"...",
")",
"}",
"\n",
"}",
"(",
"b",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"g",
".",
"Go",
"(",
"func",
"(",
")",
"error",
"{",
"return",
"db",
".",
"head",
".",
"Delete",
"(",
"mint",
",",
"maxt",
",",
"ms",
"...",
")",
"\n",
"}",
")",
"\n",
"return",
"g",
".",
"Wait",
"(",
")",
"\n",
"}"
] |
// Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis.
|
[
"Delete",
"implements",
"deletion",
"of",
"metrics",
".",
"It",
"only",
"has",
"atomicity",
"guarantees",
"on",
"a",
"per",
"-",
"block",
"basis",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L986-L1006
|
train
|
prometheus/tsdb
|
db.go
|
CleanTombstones
|
func (db *DB) CleanTombstones() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
start := time.Now()
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
newUIDs := []ulid.ULID{}
defer func() {
// If any error is caused, we need to delete all the new directory created.
if err != nil {
for _, uid := range newUIDs {
dir := filepath.Join(db.Dir(), uid.String())
if err := os.RemoveAll(dir); err != nil {
level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)
}
}
}
}()
db.mtx.RLock()
blocks := db.blocks[:]
db.mtx.RUnlock()
for _, b := range blocks {
if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil {
err = errors.Wrapf(er, "clean tombstones: %s", b.Dir())
return err
} else if uid != nil { // New block was created.
newUIDs = append(newUIDs, *uid)
}
}
return errors.Wrap(db.reload(), "reload blocks")
}
|
go
|
func (db *DB) CleanTombstones() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
start := time.Now()
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
newUIDs := []ulid.ULID{}
defer func() {
// If any error is caused, we need to delete all the new directory created.
if err != nil {
for _, uid := range newUIDs {
dir := filepath.Join(db.Dir(), uid.String())
if err := os.RemoveAll(dir); err != nil {
level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)
}
}
}
}()
db.mtx.RLock()
blocks := db.blocks[:]
db.mtx.RUnlock()
for _, b := range blocks {
if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil {
err = errors.Wrapf(er, "clean tombstones: %s", b.Dir())
return err
} else if uid != nil { // New block was created.
newUIDs = append(newUIDs, *uid)
}
}
return errors.Wrap(db.reload(), "reload blocks")
}
|
[
"func",
"(",
"db",
"*",
"DB",
")",
"CleanTombstones",
"(",
")",
"(",
"err",
"error",
")",
"{",
"db",
".",
"cmtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"db",
".",
"cmtx",
".",
"Unlock",
"(",
")",
"\n\n",
"start",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"defer",
"db",
".",
"metrics",
".",
"tombCleanTimer",
".",
"Observe",
"(",
"time",
".",
"Since",
"(",
"start",
")",
".",
"Seconds",
"(",
")",
")",
"\n\n",
"newUIDs",
":=",
"[",
"]",
"ulid",
".",
"ULID",
"{",
"}",
"\n",
"defer",
"func",
"(",
")",
"{",
"// If any error is caused, we need to delete all the new directory created.",
"if",
"err",
"!=",
"nil",
"{",
"for",
"_",
",",
"uid",
":=",
"range",
"newUIDs",
"{",
"dir",
":=",
"filepath",
".",
"Join",
"(",
"db",
".",
"Dir",
"(",
")",
",",
"uid",
".",
"String",
"(",
")",
")",
"\n",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"dir",
")",
";",
"err",
"!=",
"nil",
"{",
"level",
".",
"Error",
"(",
"db",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"dir",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"db",
".",
"mtx",
".",
"RLock",
"(",
")",
"\n",
"blocks",
":=",
"db",
".",
"blocks",
"[",
":",
"]",
"\n",
"db",
".",
"mtx",
".",
"RUnlock",
"(",
")",
"\n\n",
"for",
"_",
",",
"b",
":=",
"range",
"blocks",
"{",
"if",
"uid",
",",
"er",
":=",
"b",
".",
"CleanTombstones",
"(",
"db",
".",
"Dir",
"(",
")",
",",
"db",
".",
"compactor",
")",
";",
"er",
"!=",
"nil",
"{",
"err",
"=",
"errors",
".",
"Wrapf",
"(",
"er",
",",
"\"",
"\"",
",",
"b",
".",
"Dir",
"(",
")",
")",
"\n",
"return",
"err",
"\n",
"}",
"else",
"if",
"uid",
"!=",
"nil",
"{",
"// New block was created.",
"newUIDs",
"=",
"append",
"(",
"newUIDs",
",",
"*",
"uid",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"errors",
".",
"Wrap",
"(",
"db",
".",
"reload",
"(",
")",
",",
"\"",
"\"",
")",
"\n",
"}"
] |
// CleanTombstones re-writes any blocks with tombstones.
|
[
"CleanTombstones",
"re",
"-",
"writes",
"any",
"blocks",
"with",
"tombstones",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L1009-L1042
|
train
|
prometheus/tsdb
|
compact.go
|
ExponentialBlockRanges
|
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
ranges := make([]int64, 0, steps)
curRange := minSize
for i := 0; i < steps; i++ {
ranges = append(ranges, curRange)
curRange = curRange * int64(stepSize)
}
return ranges
}
|
go
|
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
ranges := make([]int64, 0, steps)
curRange := minSize
for i := 0; i < steps; i++ {
ranges = append(ranges, curRange)
curRange = curRange * int64(stepSize)
}
return ranges
}
|
[
"func",
"ExponentialBlockRanges",
"(",
"minSize",
"int64",
",",
"steps",
",",
"stepSize",
"int",
")",
"[",
"]",
"int64",
"{",
"ranges",
":=",
"make",
"(",
"[",
"]",
"int64",
",",
"0",
",",
"steps",
")",
"\n",
"curRange",
":=",
"minSize",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"steps",
";",
"i",
"++",
"{",
"ranges",
"=",
"append",
"(",
"ranges",
",",
"curRange",
")",
"\n",
"curRange",
"=",
"curRange",
"*",
"int64",
"(",
"stepSize",
")",
"\n",
"}",
"\n\n",
"return",
"ranges",
"\n",
"}"
] |
// ExponentialBlockRanges returns the time ranges based on the stepSize.
|
[
"ExponentialBlockRanges",
"returns",
"the",
"time",
"ranges",
"based",
"on",
"the",
"stepSize",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L41-L50
|
train
|
prometheus/tsdb
|
compact.go
|
NewLeveledCompactor
|
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) {
if len(ranges) == 0 {
return nil, errors.Errorf("at least one range must be provided")
}
if pool == nil {
pool = chunkenc.NewPool()
}
if l == nil {
l = log.NewNopLogger()
}
return &LeveledCompactor{
ranges: ranges,
chunkPool: pool,
logger: l,
metrics: newCompactorMetrics(r),
ctx: ctx,
}, nil
}
|
go
|
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) {
if len(ranges) == 0 {
return nil, errors.Errorf("at least one range must be provided")
}
if pool == nil {
pool = chunkenc.NewPool()
}
if l == nil {
l = log.NewNopLogger()
}
return &LeveledCompactor{
ranges: ranges,
chunkPool: pool,
logger: l,
metrics: newCompactorMetrics(r),
ctx: ctx,
}, nil
}
|
[
"func",
"NewLeveledCompactor",
"(",
"ctx",
"context",
".",
"Context",
",",
"r",
"prometheus",
".",
"Registerer",
",",
"l",
"log",
".",
"Logger",
",",
"ranges",
"[",
"]",
"int64",
",",
"pool",
"chunkenc",
".",
"Pool",
")",
"(",
"*",
"LeveledCompactor",
",",
"error",
")",
"{",
"if",
"len",
"(",
"ranges",
")",
"==",
"0",
"{",
"return",
"nil",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"pool",
"==",
"nil",
"{",
"pool",
"=",
"chunkenc",
".",
"NewPool",
"(",
")",
"\n",
"}",
"\n",
"if",
"l",
"==",
"nil",
"{",
"l",
"=",
"log",
".",
"NewNopLogger",
"(",
")",
"\n",
"}",
"\n",
"return",
"&",
"LeveledCompactor",
"{",
"ranges",
":",
"ranges",
",",
"chunkPool",
":",
"pool",
",",
"logger",
":",
"l",
",",
"metrics",
":",
"newCompactorMetrics",
"(",
"r",
")",
",",
"ctx",
":",
"ctx",
",",
"}",
",",
"nil",
"\n",
"}"
] |
// NewLeveledCompactor returns a LeveledCompactor.
|
[
"NewLeveledCompactor",
"returns",
"a",
"LeveledCompactor",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L151-L168
|
train
|
prometheus/tsdb
|
compact.go
|
Plan
|
func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
dirs, err := blockDirs(dir)
if err != nil {
return nil, err
}
if len(dirs) < 1 {
return nil, nil
}
var dms []dirMeta
for _, dir := range dirs {
meta, err := readMetaFile(dir)
if err != nil {
return nil, err
}
dms = append(dms, dirMeta{dir, meta})
}
return c.plan(dms)
}
|
go
|
func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
dirs, err := blockDirs(dir)
if err != nil {
return nil, err
}
if len(dirs) < 1 {
return nil, nil
}
var dms []dirMeta
for _, dir := range dirs {
meta, err := readMetaFile(dir)
if err != nil {
return nil, err
}
dms = append(dms, dirMeta{dir, meta})
}
return c.plan(dms)
}
|
[
"func",
"(",
"c",
"*",
"LeveledCompactor",
")",
"Plan",
"(",
"dir",
"string",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"dirs",
",",
"err",
":=",
"blockDirs",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"len",
"(",
"dirs",
")",
"<",
"1",
"{",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"var",
"dms",
"[",
"]",
"dirMeta",
"\n",
"for",
"_",
",",
"dir",
":=",
"range",
"dirs",
"{",
"meta",
",",
"err",
":=",
"readMetaFile",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"dms",
"=",
"append",
"(",
"dms",
",",
"dirMeta",
"{",
"dir",
",",
"meta",
"}",
")",
"\n",
"}",
"\n",
"return",
"c",
".",
"plan",
"(",
"dms",
")",
"\n",
"}"
] |
// Plan returns a list of compactable blocks in the provided directory.
|
[
"Plan",
"returns",
"a",
"list",
"of",
"compactable",
"blocks",
"in",
"the",
"provided",
"directory",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L176-L194
|
train
|
prometheus/tsdb
|
compact.go
|
selectDirs
|
func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta {
if len(c.ranges) < 2 || len(ds) < 1 {
return nil
}
highTime := ds[len(ds)-1].meta.MinTime
for _, iv := range c.ranges[1:] {
parts := splitByRange(ds, iv)
if len(parts) == 0 {
continue
}
Outer:
for _, p := range parts {
// Do not select the range if it has a block whose compaction failed.
for _, dm := range p {
if dm.meta.Compaction.Failed {
continue Outer
}
}
mint := p[0].meta.MinTime
maxt := p[len(p)-1].meta.MaxTime
// Pick the range of blocks if it spans the full range (potentially with gaps)
// or is before the most recent block.
// This ensures we don't compact blocks prematurely when another one of the same
// size still fits in the range.
if (maxt-mint == iv || maxt <= highTime) && len(p) > 1 {
return p
}
}
}
return nil
}
|
go
|
func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta {
if len(c.ranges) < 2 || len(ds) < 1 {
return nil
}
highTime := ds[len(ds)-1].meta.MinTime
for _, iv := range c.ranges[1:] {
parts := splitByRange(ds, iv)
if len(parts) == 0 {
continue
}
Outer:
for _, p := range parts {
// Do not select the range if it has a block whose compaction failed.
for _, dm := range p {
if dm.meta.Compaction.Failed {
continue Outer
}
}
mint := p[0].meta.MinTime
maxt := p[len(p)-1].meta.MaxTime
// Pick the range of blocks if it spans the full range (potentially with gaps)
// or is before the most recent block.
// This ensures we don't compact blocks prematurely when another one of the same
// size still fits in the range.
if (maxt-mint == iv || maxt <= highTime) && len(p) > 1 {
return p
}
}
}
return nil
}
|
[
"func",
"(",
"c",
"*",
"LeveledCompactor",
")",
"selectDirs",
"(",
"ds",
"[",
"]",
"dirMeta",
")",
"[",
"]",
"dirMeta",
"{",
"if",
"len",
"(",
"c",
".",
"ranges",
")",
"<",
"2",
"||",
"len",
"(",
"ds",
")",
"<",
"1",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"highTime",
":=",
"ds",
"[",
"len",
"(",
"ds",
")",
"-",
"1",
"]",
".",
"meta",
".",
"MinTime",
"\n\n",
"for",
"_",
",",
"iv",
":=",
"range",
"c",
".",
"ranges",
"[",
"1",
":",
"]",
"{",
"parts",
":=",
"splitByRange",
"(",
"ds",
",",
"iv",
")",
"\n",
"if",
"len",
"(",
"parts",
")",
"==",
"0",
"{",
"continue",
"\n",
"}",
"\n\n",
"Outer",
":",
"for",
"_",
",",
"p",
":=",
"range",
"parts",
"{",
"// Do not select the range if it has a block whose compaction failed.",
"for",
"_",
",",
"dm",
":=",
"range",
"p",
"{",
"if",
"dm",
".",
"meta",
".",
"Compaction",
".",
"Failed",
"{",
"continue",
"Outer",
"\n",
"}",
"\n",
"}",
"\n\n",
"mint",
":=",
"p",
"[",
"0",
"]",
".",
"meta",
".",
"MinTime",
"\n",
"maxt",
":=",
"p",
"[",
"len",
"(",
"p",
")",
"-",
"1",
"]",
".",
"meta",
".",
"MaxTime",
"\n",
"// Pick the range of blocks if it spans the full range (potentially with gaps)",
"// or is before the most recent block.",
"// This ensures we don't compact blocks prematurely when another one of the same",
"// size still fits in the range.",
"if",
"(",
"maxt",
"-",
"mint",
"==",
"iv",
"||",
"maxt",
"<=",
"highTime",
")",
"&&",
"len",
"(",
"p",
")",
">",
"1",
"{",
"return",
"p",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// selectDirs returns the dir metas that should be compacted into a single new block.
// If only a single block range is configured, the result is always nil.
|
[
"selectDirs",
"returns",
"the",
"dir",
"metas",
"that",
"should",
"be",
"compacted",
"into",
"a",
"single",
"new",
"block",
".",
"If",
"only",
"a",
"single",
"block",
"range",
"is",
"configured",
"the",
"result",
"is",
"always",
"nil",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L233-L268
|
train
|
prometheus/tsdb
|
compact.go
|
selectOverlappingDirs
|
func (c *LeveledCompactor) selectOverlappingDirs(ds []dirMeta) []string {
if len(ds) < 2 {
return nil
}
var overlappingDirs []string
globalMaxt := ds[0].meta.MaxTime
for i, d := range ds[1:] {
if d.meta.MinTime < globalMaxt {
if len(overlappingDirs) == 0 { // When it is the first overlap, need to add the last one as well.
overlappingDirs = append(overlappingDirs, ds[i].dir)
}
overlappingDirs = append(overlappingDirs, d.dir)
} else if len(overlappingDirs) > 0 {
break
}
if d.meta.MaxTime > globalMaxt {
globalMaxt = d.meta.MaxTime
}
}
return overlappingDirs
}
|
go
|
func (c *LeveledCompactor) selectOverlappingDirs(ds []dirMeta) []string {
if len(ds) < 2 {
return nil
}
var overlappingDirs []string
globalMaxt := ds[0].meta.MaxTime
for i, d := range ds[1:] {
if d.meta.MinTime < globalMaxt {
if len(overlappingDirs) == 0 { // When it is the first overlap, need to add the last one as well.
overlappingDirs = append(overlappingDirs, ds[i].dir)
}
overlappingDirs = append(overlappingDirs, d.dir)
} else if len(overlappingDirs) > 0 {
break
}
if d.meta.MaxTime > globalMaxt {
globalMaxt = d.meta.MaxTime
}
}
return overlappingDirs
}
|
[
"func",
"(",
"c",
"*",
"LeveledCompactor",
")",
"selectOverlappingDirs",
"(",
"ds",
"[",
"]",
"dirMeta",
")",
"[",
"]",
"string",
"{",
"if",
"len",
"(",
"ds",
")",
"<",
"2",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"var",
"overlappingDirs",
"[",
"]",
"string",
"\n",
"globalMaxt",
":=",
"ds",
"[",
"0",
"]",
".",
"meta",
".",
"MaxTime",
"\n",
"for",
"i",
",",
"d",
":=",
"range",
"ds",
"[",
"1",
":",
"]",
"{",
"if",
"d",
".",
"meta",
".",
"MinTime",
"<",
"globalMaxt",
"{",
"if",
"len",
"(",
"overlappingDirs",
")",
"==",
"0",
"{",
"// When it is the first overlap, need to add the last one as well.",
"overlappingDirs",
"=",
"append",
"(",
"overlappingDirs",
",",
"ds",
"[",
"i",
"]",
".",
"dir",
")",
"\n",
"}",
"\n",
"overlappingDirs",
"=",
"append",
"(",
"overlappingDirs",
",",
"d",
".",
"dir",
")",
"\n",
"}",
"else",
"if",
"len",
"(",
"overlappingDirs",
")",
">",
"0",
"{",
"break",
"\n",
"}",
"\n",
"if",
"d",
".",
"meta",
".",
"MaxTime",
">",
"globalMaxt",
"{",
"globalMaxt",
"=",
"d",
".",
"meta",
".",
"MaxTime",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"overlappingDirs",
"\n",
"}"
] |
// selectOverlappingDirs returns all dirs with overlapping time ranges.
// It expects sorted input by mint and returns the overlapping dirs in the same order as received.
|
[
"selectOverlappingDirs",
"returns",
"all",
"dirs",
"with",
"overlapping",
"time",
"ranges",
".",
"It",
"expects",
"sorted",
"input",
"by",
"mint",
"and",
"returns",
"the",
"overlapping",
"dirs",
"in",
"the",
"same",
"order",
"as",
"received",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L272-L292
|
train
|
prometheus/tsdb
|
tombstones.go
|
addInterval
|
func (t *memTombstones) addInterval(ref uint64, itvs ...Interval) {
t.mtx.Lock()
defer t.mtx.Unlock()
for _, itv := range itvs {
t.intvlGroups[ref] = t.intvlGroups[ref].add(itv)
}
}
|
go
|
func (t *memTombstones) addInterval(ref uint64, itvs ...Interval) {
t.mtx.Lock()
defer t.mtx.Unlock()
for _, itv := range itvs {
t.intvlGroups[ref] = t.intvlGroups[ref].add(itv)
}
}
|
[
"func",
"(",
"t",
"*",
"memTombstones",
")",
"addInterval",
"(",
"ref",
"uint64",
",",
"itvs",
"...",
"Interval",
")",
"{",
"t",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"t",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n",
"for",
"_",
",",
"itv",
":=",
"range",
"itvs",
"{",
"t",
".",
"intvlGroups",
"[",
"ref",
"]",
"=",
"t",
".",
"intvlGroups",
"[",
"ref",
"]",
".",
"add",
"(",
"itv",
")",
"\n",
"}",
"\n",
"}"
] |
// addInterval to an existing memTombstones
|
[
"addInterval",
"to",
"an",
"existing",
"memTombstones"
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/tombstones.go#L225-L231
|
train
|
prometheus/tsdb
|
tombstones.go
|
add
|
func (itvs Intervals) add(n Interval) Intervals {
for i, r := range itvs {
// TODO(gouthamve): Make this codepath easier to digest.
if r.inBounds(n.Mint-1) || r.inBounds(n.Mint) {
if n.Maxt > r.Maxt {
itvs[i].Maxt = n.Maxt
}
j := 0
for _, r2 := range itvs[i+1:] {
if n.Maxt < r2.Mint {
break
}
j++
}
if j != 0 {
if itvs[i+j].Maxt > n.Maxt {
itvs[i].Maxt = itvs[i+j].Maxt
}
itvs = append(itvs[:i+1], itvs[i+j+1:]...)
}
return itvs
}
if r.inBounds(n.Maxt+1) || r.inBounds(n.Maxt) {
if n.Mint < r.Maxt {
itvs[i].Mint = n.Mint
}
return itvs
}
if n.Mint < r.Mint {
newRange := make(Intervals, i, len(itvs[:i])+1)
copy(newRange, itvs[:i])
newRange = append(newRange, n)
newRange = append(newRange, itvs[i:]...)
return newRange
}
}
itvs = append(itvs, n)
return itvs
}
|
go
|
func (itvs Intervals) add(n Interval) Intervals {
for i, r := range itvs {
// TODO(gouthamve): Make this codepath easier to digest.
if r.inBounds(n.Mint-1) || r.inBounds(n.Mint) {
if n.Maxt > r.Maxt {
itvs[i].Maxt = n.Maxt
}
j := 0
for _, r2 := range itvs[i+1:] {
if n.Maxt < r2.Mint {
break
}
j++
}
if j != 0 {
if itvs[i+j].Maxt > n.Maxt {
itvs[i].Maxt = itvs[i+j].Maxt
}
itvs = append(itvs[:i+1], itvs[i+j+1:]...)
}
return itvs
}
if r.inBounds(n.Maxt+1) || r.inBounds(n.Maxt) {
if n.Mint < r.Maxt {
itvs[i].Mint = n.Mint
}
return itvs
}
if n.Mint < r.Mint {
newRange := make(Intervals, i, len(itvs[:i])+1)
copy(newRange, itvs[:i])
newRange = append(newRange, n)
newRange = append(newRange, itvs[i:]...)
return newRange
}
}
itvs = append(itvs, n)
return itvs
}
|
[
"func",
"(",
"itvs",
"Intervals",
")",
"add",
"(",
"n",
"Interval",
")",
"Intervals",
"{",
"for",
"i",
",",
"r",
":=",
"range",
"itvs",
"{",
"// TODO(gouthamve): Make this codepath easier to digest.",
"if",
"r",
".",
"inBounds",
"(",
"n",
".",
"Mint",
"-",
"1",
")",
"||",
"r",
".",
"inBounds",
"(",
"n",
".",
"Mint",
")",
"{",
"if",
"n",
".",
"Maxt",
">",
"r",
".",
"Maxt",
"{",
"itvs",
"[",
"i",
"]",
".",
"Maxt",
"=",
"n",
".",
"Maxt",
"\n",
"}",
"\n\n",
"j",
":=",
"0",
"\n",
"for",
"_",
",",
"r2",
":=",
"range",
"itvs",
"[",
"i",
"+",
"1",
":",
"]",
"{",
"if",
"n",
".",
"Maxt",
"<",
"r2",
".",
"Mint",
"{",
"break",
"\n",
"}",
"\n",
"j",
"++",
"\n",
"}",
"\n",
"if",
"j",
"!=",
"0",
"{",
"if",
"itvs",
"[",
"i",
"+",
"j",
"]",
".",
"Maxt",
">",
"n",
".",
"Maxt",
"{",
"itvs",
"[",
"i",
"]",
".",
"Maxt",
"=",
"itvs",
"[",
"i",
"+",
"j",
"]",
".",
"Maxt",
"\n",
"}",
"\n",
"itvs",
"=",
"append",
"(",
"itvs",
"[",
":",
"i",
"+",
"1",
"]",
",",
"itvs",
"[",
"i",
"+",
"j",
"+",
"1",
":",
"]",
"...",
")",
"\n",
"}",
"\n",
"return",
"itvs",
"\n",
"}",
"\n\n",
"if",
"r",
".",
"inBounds",
"(",
"n",
".",
"Maxt",
"+",
"1",
")",
"||",
"r",
".",
"inBounds",
"(",
"n",
".",
"Maxt",
")",
"{",
"if",
"n",
".",
"Mint",
"<",
"r",
".",
"Maxt",
"{",
"itvs",
"[",
"i",
"]",
".",
"Mint",
"=",
"n",
".",
"Mint",
"\n",
"}",
"\n",
"return",
"itvs",
"\n",
"}",
"\n\n",
"if",
"n",
".",
"Mint",
"<",
"r",
".",
"Mint",
"{",
"newRange",
":=",
"make",
"(",
"Intervals",
",",
"i",
",",
"len",
"(",
"itvs",
"[",
":",
"i",
"]",
")",
"+",
"1",
")",
"\n",
"copy",
"(",
"newRange",
",",
"itvs",
"[",
":",
"i",
"]",
")",
"\n",
"newRange",
"=",
"append",
"(",
"newRange",
",",
"n",
")",
"\n",
"newRange",
"=",
"append",
"(",
"newRange",
",",
"itvs",
"[",
"i",
":",
"]",
"...",
")",
"\n\n",
"return",
"newRange",
"\n",
"}",
"\n",
"}",
"\n\n",
"itvs",
"=",
"append",
"(",
"itvs",
",",
"n",
")",
"\n",
"return",
"itvs",
"\n",
"}"
] |
// add the new time-range to the existing ones.
// The existing ones must be sorted.
|
[
"add",
"the",
"new",
"time",
"-",
"range",
"to",
"the",
"existing",
"ones",
".",
"The",
"existing",
"ones",
"must",
"be",
"sorted",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/tombstones.go#L271-L314
|
train
|
prometheus/tsdb
|
fileutil/fileutil.go
|
CopyDirs
|
func CopyDirs(src, dest string) error {
if err := os.MkdirAll(dest, 0777); err != nil {
return err
}
files, err := readDirs(src)
if err != nil {
return err
}
for _, f := range files {
dp := filepath.Join(dest, f)
sp := filepath.Join(src, f)
stat, err := os.Stat(sp)
if err != nil {
return err
}
// Empty directories are also created.
if stat.IsDir() {
if err := os.MkdirAll(dp, 0777); err != nil {
return err
}
continue
}
if err := copyFile(sp, dp); err != nil {
return err
}
}
return nil
}
|
go
|
func CopyDirs(src, dest string) error {
if err := os.MkdirAll(dest, 0777); err != nil {
return err
}
files, err := readDirs(src)
if err != nil {
return err
}
for _, f := range files {
dp := filepath.Join(dest, f)
sp := filepath.Join(src, f)
stat, err := os.Stat(sp)
if err != nil {
return err
}
// Empty directories are also created.
if stat.IsDir() {
if err := os.MkdirAll(dp, 0777); err != nil {
return err
}
continue
}
if err := copyFile(sp, dp); err != nil {
return err
}
}
return nil
}
|
[
"func",
"CopyDirs",
"(",
"src",
",",
"dest",
"string",
")",
"error",
"{",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"dest",
",",
"0777",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"files",
",",
"err",
":=",
"readDirs",
"(",
"src",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"f",
":=",
"range",
"files",
"{",
"dp",
":=",
"filepath",
".",
"Join",
"(",
"dest",
",",
"f",
")",
"\n",
"sp",
":=",
"filepath",
".",
"Join",
"(",
"src",
",",
"f",
")",
"\n\n",
"stat",
",",
"err",
":=",
"os",
".",
"Stat",
"(",
"sp",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Empty directories are also created.",
"if",
"stat",
".",
"IsDir",
"(",
")",
"{",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"dp",
",",
"0777",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"continue",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"copyFile",
"(",
"sp",
",",
"dp",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// CopyDirs copies all directories, subdirectories and files recursively including the empty folders.
// Source and destination must be full paths.
|
[
"CopyDirs",
"copies",
"all",
"directories",
"subdirectories",
"and",
"files",
"recursively",
"including",
"the",
"empty",
"folders",
".",
"Source",
"and",
"destination",
"must",
"be",
"full",
"paths",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/fileutil/fileutil.go#L30-L61
|
train
|
prometheus/tsdb
|
fileutil/fileutil.go
|
readDirs
|
func readDirs(src string) ([]string, error) {
var files []string
err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, src)
if len(relativePath) > 0 {
files = append(files, relativePath)
}
return nil
})
if err != nil {
return nil, err
}
return files, nil
}
|
go
|
func readDirs(src string) ([]string, error) {
var files []string
err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, src)
if len(relativePath) > 0 {
files = append(files, relativePath)
}
return nil
})
if err != nil {
return nil, err
}
return files, nil
}
|
[
"func",
"readDirs",
"(",
"src",
"string",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"var",
"files",
"[",
"]",
"string",
"\n\n",
"err",
":=",
"filepath",
".",
"Walk",
"(",
"src",
",",
"func",
"(",
"path",
"string",
",",
"f",
"os",
".",
"FileInfo",
",",
"err",
"error",
")",
"error",
"{",
"relativePath",
":=",
"strings",
".",
"TrimPrefix",
"(",
"path",
",",
"src",
")",
"\n",
"if",
"len",
"(",
"relativePath",
")",
">",
"0",
"{",
"files",
"=",
"append",
"(",
"files",
",",
"relativePath",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"files",
",",
"nil",
"\n",
"}"
] |
// readDirs reads the source directory recursively and
// returns relative paths to all files and empty directories.
|
[
"readDirs",
"reads",
"the",
"source",
"directory",
"recursively",
"and",
"returns",
"relative",
"paths",
"to",
"all",
"files",
"and",
"empty",
"directories",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/fileutil/fileutil.go#L78-L92
|
train
|
prometheus/tsdb
|
fileutil/fileutil.go
|
Replace
|
func Replace(from, to string) error {
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = pdir.Sync(); err != nil {
pdir.Close()
return err
}
return pdir.Close()
}
|
go
|
func Replace(from, to string) error {
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = pdir.Sync(); err != nil {
pdir.Close()
return err
}
return pdir.Close()
}
|
[
"func",
"Replace",
"(",
"from",
",",
"to",
"string",
")",
"error",
"{",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"to",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"os",
".",
"Rename",
"(",
"from",
",",
"to",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Directory was renamed; sync parent dir to persist rename.",
"pdir",
",",
"err",
":=",
"OpenDir",
"(",
"filepath",
".",
"Dir",
"(",
"to",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
"=",
"pdir",
".",
"Sync",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"pdir",
".",
"Close",
"(",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"return",
"pdir",
".",
"Close",
"(",
")",
"\n",
"}"
] |
// Replace moves a file or directory to a new location and deletes any previous data.
// It is not atomic.
|
[
"Replace",
"moves",
"a",
"file",
"or",
"directory",
"to",
"a",
"new",
"location",
"and",
"deletes",
"any",
"previous",
"data",
".",
"It",
"is",
"not",
"atomic",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/fileutil/fileutil.go#L130-L149
|
train
|
prometheus/tsdb
|
checkpoint.go
|
LastCheckpoint
|
func LastCheckpoint(dir string) (string, int, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return "", 0, err
}
// Traverse list backwards since there may be multiple checkpoints left.
for i := len(files) - 1; i >= 0; i-- {
fi := files[i]
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
continue
}
if !fi.IsDir() {
return "", 0, errors.Errorf("checkpoint %s is not a directory", fi.Name())
}
idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
if err != nil {
continue
}
return filepath.Join(dir, fi.Name()), idx, nil
}
return "", 0, ErrNotFound
}
|
go
|
func LastCheckpoint(dir string) (string, int, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return "", 0, err
}
// Traverse list backwards since there may be multiple checkpoints left.
for i := len(files) - 1; i >= 0; i-- {
fi := files[i]
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
continue
}
if !fi.IsDir() {
return "", 0, errors.Errorf("checkpoint %s is not a directory", fi.Name())
}
idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
if err != nil {
continue
}
return filepath.Join(dir, fi.Name()), idx, nil
}
return "", 0, ErrNotFound
}
|
[
"func",
"LastCheckpoint",
"(",
"dir",
"string",
")",
"(",
"string",
",",
"int",
",",
"error",
")",
"{",
"files",
",",
"err",
":=",
"ioutil",
".",
"ReadDir",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"// Traverse list backwards since there may be multiple checkpoints left.",
"for",
"i",
":=",
"len",
"(",
"files",
")",
"-",
"1",
";",
"i",
">=",
"0",
";",
"i",
"--",
"{",
"fi",
":=",
"files",
"[",
"i",
"]",
"\n\n",
"if",
"!",
"strings",
".",
"HasPrefix",
"(",
"fi",
".",
"Name",
"(",
")",
",",
"checkpointPrefix",
")",
"{",
"continue",
"\n",
"}",
"\n",
"if",
"!",
"fi",
".",
"IsDir",
"(",
")",
"{",
"return",
"\"",
"\"",
",",
"0",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"idx",
",",
"err",
":=",
"strconv",
".",
"Atoi",
"(",
"fi",
".",
"Name",
"(",
")",
"[",
"len",
"(",
"checkpointPrefix",
")",
":",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"continue",
"\n",
"}",
"\n",
"return",
"filepath",
".",
"Join",
"(",
"dir",
",",
"fi",
".",
"Name",
"(",
")",
")",
",",
"idx",
",",
"nil",
"\n",
"}",
"\n",
"return",
"\"",
"\"",
",",
"0",
",",
"ErrNotFound",
"\n",
"}"
] |
// LastCheckpoint returns the directory name and index of the most recent checkpoint.
// If dir does not contain any checkpoints, ErrNotFound is returned.
|
[
"LastCheckpoint",
"returns",
"the",
"directory",
"name",
"and",
"index",
"of",
"the",
"most",
"recent",
"checkpoint",
".",
"If",
"dir",
"does",
"not",
"contain",
"any",
"checkpoints",
"ErrNotFound",
"is",
"returned",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/checkpoint.go#L45-L67
|
train
|
prometheus/tsdb
|
checkpoint.go
|
DeleteCheckpoints
|
func DeleteCheckpoints(dir string, maxIndex int) error {
var errs tsdb_errors.MultiError
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, fi := range files {
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
continue
}
index, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
if err != nil || index >= maxIndex {
continue
}
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
errs.Add(err)
}
}
return errs.Err()
}
|
go
|
func DeleteCheckpoints(dir string, maxIndex int) error {
var errs tsdb_errors.MultiError
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, fi := range files {
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
continue
}
index, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
if err != nil || index >= maxIndex {
continue
}
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
errs.Add(err)
}
}
return errs.Err()
}
|
[
"func",
"DeleteCheckpoints",
"(",
"dir",
"string",
",",
"maxIndex",
"int",
")",
"error",
"{",
"var",
"errs",
"tsdb_errors",
".",
"MultiError",
"\n\n",
"files",
",",
"err",
":=",
"ioutil",
".",
"ReadDir",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"for",
"_",
",",
"fi",
":=",
"range",
"files",
"{",
"if",
"!",
"strings",
".",
"HasPrefix",
"(",
"fi",
".",
"Name",
"(",
")",
",",
"checkpointPrefix",
")",
"{",
"continue",
"\n",
"}",
"\n",
"index",
",",
"err",
":=",
"strconv",
".",
"Atoi",
"(",
"fi",
".",
"Name",
"(",
")",
"[",
"len",
"(",
"checkpointPrefix",
")",
":",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"||",
"index",
">=",
"maxIndex",
"{",
"continue",
"\n",
"}",
"\n",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"filepath",
".",
"Join",
"(",
"dir",
",",
"fi",
".",
"Name",
"(",
")",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"errs",
".",
"Add",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"errs",
".",
"Err",
"(",
")",
"\n",
"}"
] |
// DeleteCheckpoints deletes all checkpoints in a directory below a given index.
|
[
"DeleteCheckpoints",
"deletes",
"all",
"checkpoints",
"in",
"a",
"directory",
"below",
"a",
"given",
"index",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/checkpoint.go#L70-L90
|
train
|
prometheus/tsdb
|
wal/live_reader.go
|
NewLiveReader
|
func NewLiveReader(logger log.Logger, r io.Reader) *LiveReader {
return &LiveReader{
logger: logger,
rdr: r,
// Until we understand how they come about, make readers permissive
// to records spanning pages.
permissive: true,
}
}
|
go
|
func NewLiveReader(logger log.Logger, r io.Reader) *LiveReader {
return &LiveReader{
logger: logger,
rdr: r,
// Until we understand how they come about, make readers permissive
// to records spanning pages.
permissive: true,
}
}
|
[
"func",
"NewLiveReader",
"(",
"logger",
"log",
".",
"Logger",
",",
"r",
"io",
".",
"Reader",
")",
"*",
"LiveReader",
"{",
"return",
"&",
"LiveReader",
"{",
"logger",
":",
"logger",
",",
"rdr",
":",
"r",
",",
"// Until we understand how they come about, make readers permissive",
"// to records spanning pages.",
"permissive",
":",
"true",
",",
"}",
"\n",
"}"
] |
// NewLiveReader returns a new live reader.
|
[
"NewLiveReader",
"returns",
"a",
"new",
"live",
"reader",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/live_reader.go#L38-L47
|
train
|
prometheus/tsdb
|
wal/live_reader.go
|
Err
|
func (r *LiveReader) Err() error {
if r.eofNonErr && r.err == io.EOF {
return nil
}
return r.err
}
|
go
|
func (r *LiveReader) Err() error {
if r.eofNonErr && r.err == io.EOF {
return nil
}
return r.err
}
|
[
"func",
"(",
"r",
"*",
"LiveReader",
")",
"Err",
"(",
")",
"error",
"{",
"if",
"r",
".",
"eofNonErr",
"&&",
"r",
".",
"err",
"==",
"io",
".",
"EOF",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"return",
"r",
".",
"err",
"\n",
"}"
] |
// Err returns any errors encountered reading the WAL. io.EOFs are not terminal
// and Next can be tried again. Non-EOFs are terminal, and the reader should
// not be used again. It is up to the user to decide when to stop trying should
// io.EOF be returned.
|
[
"Err",
"returns",
"any",
"errors",
"encountered",
"reading",
"the",
"WAL",
".",
"io",
".",
"EOFs",
"are",
"not",
"terminal",
"and",
"Next",
"can",
"be",
"tried",
"again",
".",
"Non",
"-",
"EOFs",
"are",
"terminal",
"and",
"the",
"reader",
"should",
"not",
"be",
"used",
"again",
".",
"It",
"is",
"up",
"to",
"the",
"user",
"to",
"decide",
"when",
"to",
"stop",
"trying",
"should",
"io",
".",
"EOF",
"be",
"returned",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/live_reader.go#L77-L82
|
train
|
prometheus/tsdb
|
wal/live_reader.go
|
buildRecord
|
func (r *LiveReader) buildRecord() (bool, error) {
for {
// Check that we have data in the internal buffer to read.
if r.writeIndex <= r.readIndex {
return false, nil
}
// Attempt to read a record, partial or otherwise.
temp, n, err := r.readRecord()
if err != nil {
return false, err
}
r.readIndex += n
r.total += int64(n)
if temp == nil {
return false, nil
}
rt := recType(r.hdr[0])
if rt == recFirst || rt == recFull {
r.rec = r.rec[:0]
}
r.rec = append(r.rec, temp...)
if err := validateRecord(rt, r.index); err != nil {
r.index = 0
return false, err
}
if rt == recLast || rt == recFull {
r.index = 0
return true, nil
}
// Only increment i for non-zero records since we use it
// to determine valid content record sequences.
r.index++
}
}
|
go
|
func (r *LiveReader) buildRecord() (bool, error) {
for {
// Check that we have data in the internal buffer to read.
if r.writeIndex <= r.readIndex {
return false, nil
}
// Attempt to read a record, partial or otherwise.
temp, n, err := r.readRecord()
if err != nil {
return false, err
}
r.readIndex += n
r.total += int64(n)
if temp == nil {
return false, nil
}
rt := recType(r.hdr[0])
if rt == recFirst || rt == recFull {
r.rec = r.rec[:0]
}
r.rec = append(r.rec, temp...)
if err := validateRecord(rt, r.index); err != nil {
r.index = 0
return false, err
}
if rt == recLast || rt == recFull {
r.index = 0
return true, nil
}
// Only increment i for non-zero records since we use it
// to determine valid content record sequences.
r.index++
}
}
|
[
"func",
"(",
"r",
"*",
"LiveReader",
")",
"buildRecord",
"(",
")",
"(",
"bool",
",",
"error",
")",
"{",
"for",
"{",
"// Check that we have data in the internal buffer to read.",
"if",
"r",
".",
"writeIndex",
"<=",
"r",
".",
"readIndex",
"{",
"return",
"false",
",",
"nil",
"\n",
"}",
"\n\n",
"// Attempt to read a record, partial or otherwise.",
"temp",
",",
"n",
",",
"err",
":=",
"r",
".",
"readRecord",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"false",
",",
"err",
"\n",
"}",
"\n\n",
"r",
".",
"readIndex",
"+=",
"n",
"\n",
"r",
".",
"total",
"+=",
"int64",
"(",
"n",
")",
"\n",
"if",
"temp",
"==",
"nil",
"{",
"return",
"false",
",",
"nil",
"\n",
"}",
"\n\n",
"rt",
":=",
"recType",
"(",
"r",
".",
"hdr",
"[",
"0",
"]",
")",
"\n",
"if",
"rt",
"==",
"recFirst",
"||",
"rt",
"==",
"recFull",
"{",
"r",
".",
"rec",
"=",
"r",
".",
"rec",
"[",
":",
"0",
"]",
"\n",
"}",
"\n",
"r",
".",
"rec",
"=",
"append",
"(",
"r",
".",
"rec",
",",
"temp",
"...",
")",
"\n\n",
"if",
"err",
":=",
"validateRecord",
"(",
"rt",
",",
"r",
".",
"index",
")",
";",
"err",
"!=",
"nil",
"{",
"r",
".",
"index",
"=",
"0",
"\n",
"return",
"false",
",",
"err",
"\n",
"}",
"\n",
"if",
"rt",
"==",
"recLast",
"||",
"rt",
"==",
"recFull",
"{",
"r",
".",
"index",
"=",
"0",
"\n",
"return",
"true",
",",
"nil",
"\n",
"}",
"\n",
"// Only increment i for non-zero records since we use it",
"// to determine valid content record sequences.",
"r",
".",
"index",
"++",
"\n",
"}",
"\n",
"}"
] |
// Rebuild a full record from potentially partial records. Returns false
// if there was an error or if we weren't able to read a record for any reason.
// Returns true if we read a full record. Any record data is appended to
// LiveReader.rec
|
[
"Rebuild",
"a",
"full",
"record",
"from",
"potentially",
"partial",
"records",
".",
"Returns",
"false",
"if",
"there",
"was",
"an",
"error",
"or",
"if",
"we",
"weren",
"t",
"able",
"to",
"read",
"a",
"record",
"for",
"any",
"reason",
".",
"Returns",
"true",
"if",
"we",
"read",
"a",
"full",
"record",
".",
"Any",
"record",
"data",
"is",
"appended",
"to",
"LiveReader",
".",
"rec"
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/live_reader.go#L150-L187
|
train
|
prometheus/tsdb
|
errors/errors.go
|
Add
|
func (es *MultiError) Add(err error) {
if err == nil {
return
}
if merr, ok := err.(MultiError); ok {
*es = append(*es, merr...)
} else {
*es = append(*es, err)
}
}
|
go
|
func (es *MultiError) Add(err error) {
if err == nil {
return
}
if merr, ok := err.(MultiError); ok {
*es = append(*es, merr...)
} else {
*es = append(*es, err)
}
}
|
[
"func",
"(",
"es",
"*",
"MultiError",
")",
"Add",
"(",
"err",
"error",
")",
"{",
"if",
"err",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"if",
"merr",
",",
"ok",
":=",
"err",
".",
"(",
"MultiError",
")",
";",
"ok",
"{",
"*",
"es",
"=",
"append",
"(",
"*",
"es",
",",
"merr",
"...",
")",
"\n",
"}",
"else",
"{",
"*",
"es",
"=",
"append",
"(",
"*",
"es",
",",
"err",
")",
"\n",
"}",
"\n",
"}"
] |
// Add adds the error to the error list if it is not nil.
|
[
"Add",
"adds",
"the",
"error",
"to",
"the",
"error",
"list",
"if",
"it",
"is",
"not",
"nil",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/errors/errors.go#L45-L54
|
train
|
prometheus/tsdb
|
wal/wal.go
|
OpenWriteSegment
|
func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) {
segName := SegmentName(dir, k)
f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
// If the last page is torn, fill it with zeros.
// In case it was torn after all records were written successfully, this
// will just pad the page and everything will be fine.
// If it was torn mid-record, a full read (which the caller should do anyway
// to ensure integrity) will detect it as a corruption by the end.
if d := stat.Size() % pageSize; d != 0 {
level.Warn(logger).Log("msg", "last page of the wal is torn, filling it with zeros", "segment", segName)
if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
f.Close()
return nil, errors.Wrap(err, "zero-pad torn page")
}
}
return &Segment{File: f, i: k, dir: dir}, nil
}
|
go
|
func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) {
segName := SegmentName(dir, k)
f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
// If the last page is torn, fill it with zeros.
// In case it was torn after all records were written successfully, this
// will just pad the page and everything will be fine.
// If it was torn mid-record, a full read (which the caller should do anyway
// to ensure integrity) will detect it as a corruption by the end.
if d := stat.Size() % pageSize; d != 0 {
level.Warn(logger).Log("msg", "last page of the wal is torn, filling it with zeros", "segment", segName)
if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
f.Close()
return nil, errors.Wrap(err, "zero-pad torn page")
}
}
return &Segment{File: f, i: k, dir: dir}, nil
}
|
[
"func",
"OpenWriteSegment",
"(",
"logger",
"log",
".",
"Logger",
",",
"dir",
"string",
",",
"k",
"int",
")",
"(",
"*",
"Segment",
",",
"error",
")",
"{",
"segName",
":=",
"SegmentName",
"(",
"dir",
",",
"k",
")",
"\n",
"f",
",",
"err",
":=",
"os",
".",
"OpenFile",
"(",
"segName",
",",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_APPEND",
",",
"0666",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"stat",
",",
"err",
":=",
"f",
".",
"Stat",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"f",
".",
"Close",
"(",
")",
"\n",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"// If the last page is torn, fill it with zeros.",
"// In case it was torn after all records were written successfully, this",
"// will just pad the page and everything will be fine.",
"// If it was torn mid-record, a full read (which the caller should do anyway",
"// to ensure integrity) will detect it as a corruption by the end.",
"if",
"d",
":=",
"stat",
".",
"Size",
"(",
")",
"%",
"pageSize",
";",
"d",
"!=",
"0",
"{",
"level",
".",
"Warn",
"(",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"segName",
")",
"\n",
"if",
"_",
",",
"err",
":=",
"f",
".",
"Write",
"(",
"make",
"(",
"[",
"]",
"byte",
",",
"pageSize",
"-",
"d",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"f",
".",
"Close",
"(",
")",
"\n",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"&",
"Segment",
"{",
"File",
":",
"f",
",",
"i",
":",
"k",
",",
"dir",
":",
"dir",
"}",
",",
"nil",
"\n",
"}"
] |
// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends.
|
[
"OpenWriteSegment",
"opens",
"segment",
"k",
"in",
"dir",
".",
"The",
"returned",
"segment",
"is",
"ready",
"for",
"new",
"appends",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L99-L123
|
train
|
prometheus/tsdb
|
wal/wal.go
|
CreateSegment
|
func CreateSegment(dir string, k int) (*Segment, error) {
f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: dir}, nil
}
|
go
|
func CreateSegment(dir string, k int) (*Segment, error) {
f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: dir}, nil
}
|
[
"func",
"CreateSegment",
"(",
"dir",
"string",
",",
"k",
"int",
")",
"(",
"*",
"Segment",
",",
"error",
")",
"{",
"f",
",",
"err",
":=",
"os",
".",
"OpenFile",
"(",
"SegmentName",
"(",
"dir",
",",
"k",
")",
",",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_CREATE",
"|",
"os",
".",
"O_APPEND",
",",
"0666",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"&",
"Segment",
"{",
"File",
":",
"f",
",",
"i",
":",
"k",
",",
"dir",
":",
"dir",
"}",
",",
"nil",
"\n",
"}"
] |
// CreateSegment creates a new segment k in dir.
|
[
"CreateSegment",
"creates",
"a",
"new",
"segment",
"k",
"in",
"dir",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L126-L132
|
train
|
prometheus/tsdb
|
wal/wal.go
|
OpenReadSegment
|
func OpenReadSegment(fn string) (*Segment, error) {
k, err := strconv.Atoi(filepath.Base(fn))
if err != nil {
return nil, errors.New("not a valid filename")
}
f, err := os.Open(fn)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil
}
|
go
|
func OpenReadSegment(fn string) (*Segment, error) {
k, err := strconv.Atoi(filepath.Base(fn))
if err != nil {
return nil, errors.New("not a valid filename")
}
f, err := os.Open(fn)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil
}
|
[
"func",
"OpenReadSegment",
"(",
"fn",
"string",
")",
"(",
"*",
"Segment",
",",
"error",
")",
"{",
"k",
",",
"err",
":=",
"strconv",
".",
"Atoi",
"(",
"filepath",
".",
"Base",
"(",
"fn",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"f",
",",
"err",
":=",
"os",
".",
"Open",
"(",
"fn",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"&",
"Segment",
"{",
"File",
":",
"f",
",",
"i",
":",
"k",
",",
"dir",
":",
"filepath",
".",
"Dir",
"(",
"fn",
")",
"}",
",",
"nil",
"\n",
"}"
] |
// OpenReadSegment opens the segment with the given filename.
|
[
"OpenReadSegment",
"opens",
"the",
"segment",
"with",
"the",
"given",
"filename",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L135-L145
|
train
|
prometheus/tsdb
|
wal/wal.go
|
New
|
func New(logger log.Logger, reg prometheus.Registerer, dir string) (*WAL, error) {
return NewSize(logger, reg, dir, DefaultSegmentSize)
}
|
go
|
func New(logger log.Logger, reg prometheus.Registerer, dir string) (*WAL, error) {
return NewSize(logger, reg, dir, DefaultSegmentSize)
}
|
[
"func",
"New",
"(",
"logger",
"log",
".",
"Logger",
",",
"reg",
"prometheus",
".",
"Registerer",
",",
"dir",
"string",
")",
"(",
"*",
"WAL",
",",
"error",
")",
"{",
"return",
"NewSize",
"(",
"logger",
",",
"reg",
",",
"dir",
",",
"DefaultSegmentSize",
")",
"\n",
"}"
] |
// New returns a new WAL over the given directory.
|
[
"New",
"returns",
"a",
"new",
"WAL",
"over",
"the",
"given",
"directory",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L177-L179
|
train
|
prometheus/tsdb
|
wal/wal.go
|
NewSize
|
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int) (*WAL, error) {
if segmentSize%pageSize != 0 {
return nil, errors.New("invalid segment size")
}
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, errors.Wrap(err, "create dir")
}
if logger == nil {
logger = log.NewNopLogger()
}
w := &WAL{
dir: dir,
logger: logger,
segmentSize: segmentSize,
page: &page{},
actorc: make(chan func(), 100),
stopc: make(chan chan struct{}),
}
w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_fsync_duration_seconds",
Help: "Duration of WAL fsync.",
})
w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_page_flushes_total",
Help: "Total number of page flushes.",
})
w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_completed_pages_total",
Help: "Total number of completed pages.",
})
w.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_failed_total",
Help: "Total number of WAL truncations that failed.",
})
w.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_total",
Help: "Total number of WAL truncations attempted.",
})
if reg != nil {
reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal)
}
_, j, err := w.Segments()
if err != nil {
return nil, errors.Wrap(err, "get segment range")
}
// Fresh dir, no segments yet.
if j == -1 {
segment, err := CreateSegment(w.dir, 0)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
} else {
segment, err := OpenWriteSegment(logger, w.dir, j)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
}
go w.run()
return w, nil
}
|
go
|
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int) (*WAL, error) {
if segmentSize%pageSize != 0 {
return nil, errors.New("invalid segment size")
}
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, errors.Wrap(err, "create dir")
}
if logger == nil {
logger = log.NewNopLogger()
}
w := &WAL{
dir: dir,
logger: logger,
segmentSize: segmentSize,
page: &page{},
actorc: make(chan func(), 100),
stopc: make(chan chan struct{}),
}
w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_fsync_duration_seconds",
Help: "Duration of WAL fsync.",
})
w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_page_flushes_total",
Help: "Total number of page flushes.",
})
w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_completed_pages_total",
Help: "Total number of completed pages.",
})
w.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_failed_total",
Help: "Total number of WAL truncations that failed.",
})
w.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_total",
Help: "Total number of WAL truncations attempted.",
})
if reg != nil {
reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal)
}
_, j, err := w.Segments()
if err != nil {
return nil, errors.Wrap(err, "get segment range")
}
// Fresh dir, no segments yet.
if j == -1 {
segment, err := CreateSegment(w.dir, 0)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
} else {
segment, err := OpenWriteSegment(logger, w.dir, j)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
}
go w.run()
return w, nil
}
|
[
"func",
"NewSize",
"(",
"logger",
"log",
".",
"Logger",
",",
"reg",
"prometheus",
".",
"Registerer",
",",
"dir",
"string",
",",
"segmentSize",
"int",
")",
"(",
"*",
"WAL",
",",
"error",
")",
"{",
"if",
"segmentSize",
"%",
"pageSize",
"!=",
"0",
"{",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"dir",
",",
"0777",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"logger",
"==",
"nil",
"{",
"logger",
"=",
"log",
".",
"NewNopLogger",
"(",
")",
"\n",
"}",
"\n",
"w",
":=",
"&",
"WAL",
"{",
"dir",
":",
"dir",
",",
"logger",
":",
"logger",
",",
"segmentSize",
":",
"segmentSize",
",",
"page",
":",
"&",
"page",
"{",
"}",
",",
"actorc",
":",
"make",
"(",
"chan",
"func",
"(",
")",
",",
"100",
")",
",",
"stopc",
":",
"make",
"(",
"chan",
"chan",
"struct",
"{",
"}",
")",
",",
"}",
"\n",
"w",
".",
"fsyncDuration",
"=",
"prometheus",
".",
"NewSummary",
"(",
"prometheus",
".",
"SummaryOpts",
"{",
"Name",
":",
"\"",
"\"",
",",
"Help",
":",
"\"",
"\"",
",",
"}",
")",
"\n",
"w",
".",
"pageFlushes",
"=",
"prometheus",
".",
"NewCounter",
"(",
"prometheus",
".",
"CounterOpts",
"{",
"Name",
":",
"\"",
"\"",
",",
"Help",
":",
"\"",
"\"",
",",
"}",
")",
"\n",
"w",
".",
"pageCompletions",
"=",
"prometheus",
".",
"NewCounter",
"(",
"prometheus",
".",
"CounterOpts",
"{",
"Name",
":",
"\"",
"\"",
",",
"Help",
":",
"\"",
"\"",
",",
"}",
")",
"\n",
"w",
".",
"truncateFail",
"=",
"prometheus",
".",
"NewCounter",
"(",
"prometheus",
".",
"CounterOpts",
"{",
"Name",
":",
"\"",
"\"",
",",
"Help",
":",
"\"",
"\"",
",",
"}",
")",
"\n",
"w",
".",
"truncateTotal",
"=",
"prometheus",
".",
"NewCounter",
"(",
"prometheus",
".",
"CounterOpts",
"{",
"Name",
":",
"\"",
"\"",
",",
"Help",
":",
"\"",
"\"",
",",
"}",
")",
"\n",
"if",
"reg",
"!=",
"nil",
"{",
"reg",
".",
"MustRegister",
"(",
"w",
".",
"fsyncDuration",
",",
"w",
".",
"pageFlushes",
",",
"w",
".",
"pageCompletions",
",",
"w",
".",
"truncateFail",
",",
"w",
".",
"truncateTotal",
")",
"\n",
"}",
"\n\n",
"_",
",",
"j",
",",
"err",
":=",
"w",
".",
"Segments",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"// Fresh dir, no segments yet.",
"if",
"j",
"==",
"-",
"1",
"{",
"segment",
",",
"err",
":=",
"CreateSegment",
"(",
"w",
".",
"dir",
",",
"0",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"w",
".",
"setSegment",
"(",
"segment",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"else",
"{",
"segment",
",",
"err",
":=",
"OpenWriteSegment",
"(",
"logger",
",",
"w",
".",
"dir",
",",
"j",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"w",
".",
"setSegment",
"(",
"segment",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"go",
"w",
".",
"run",
"(",
")",
"\n\n",
"return",
"w",
",",
"nil",
"\n",
"}"
] |
// NewSize returns a new WAL over the given directory.
// New segments are created with the specified size.
|
[
"NewSize",
"returns",
"a",
"new",
"WAL",
"over",
"the",
"given",
"directory",
".",
"New",
"segments",
"are",
"created",
"with",
"the",
"specified",
"size",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L183-L252
|
train
|
prometheus/tsdb
|
wal/wal.go
|
Repair
|
func (w *WAL) Repair(origErr error) error {
// We could probably have a mode that only discards torn records right around
// the corruption to preserve as data much as possible.
// But that's not generally applicable if the records have any kind of causality.
// Maybe as an extra mode in the future if mid-WAL corruptions become
// a frequent concern.
err := errors.Cause(origErr) // So that we can pick up errors even if wrapped.
cerr, ok := err.(*CorruptionErr)
if !ok {
return errors.Wrap(origErr, "cannot handle error")
}
if cerr.Segment < 0 {
return errors.New("corruption error does not specify position")
}
level.Warn(w.logger).Log("msg", "starting corruption repair",
"segment", cerr.Segment, "offset", cerr.Offset)
// All segments behind the corruption can no longer be used.
segs, err := listSegments(w.dir)
if err != nil {
return errors.Wrap(err, "list segments")
}
level.Warn(w.logger).Log("msg", "deleting all segments newer than corrupted segment", "segment", cerr.Segment)
for _, s := range segs {
if w.segment.i == s.index {
// The active segment needs to be removed,
// close it first (Windows!). Can be closed safely
// as we set the current segment to repaired file
// below.
if err := w.segment.Close(); err != nil {
return errors.Wrap(err, "close active segment")
}
}
if s.index <= cerr.Segment {
continue
}
if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil {
return errors.Wrapf(err, "delete segment:%v", s.index)
}
}
// Regardless of the corruption offset, no record reaches into the previous segment.
// So we can safely repair the WAL by removing the segment and re-inserting all
// its records up to the corruption.
level.Warn(w.logger).Log("msg", "rewrite corrupted segment", "segment", cerr.Segment)
fn := SegmentName(w.dir, cerr.Segment)
tmpfn := fn + ".repair"
if err := fileutil.Rename(fn, tmpfn); err != nil {
return err
}
// Create a clean segment and make it the active one.
s, err := CreateSegment(w.dir, cerr.Segment)
if err != nil {
return err
}
if err := w.setSegment(s); err != nil {
return err
}
f, err := os.Open(tmpfn)
if err != nil {
return errors.Wrap(err, "open segment")
}
defer f.Close()
r := NewReader(bufio.NewReader(f))
for r.Next() {
// Add records only up to the where the error was.
if r.Offset() >= cerr.Offset {
break
}
if err := w.Log(r.Record()); err != nil {
return errors.Wrap(err, "insert record")
}
}
// We expect an error here from r.Err(), so nothing to handle.
// We explicitly close even when there is a defer for Windows to be
// able to delete it. The defer is in place to close it in-case there
// are errors above.
if err := f.Close(); err != nil {
return errors.Wrap(err, "close corrupted file")
}
if err := os.Remove(tmpfn); err != nil {
return errors.Wrap(err, "delete corrupted segment")
}
return nil
}
|
go
|
func (w *WAL) Repair(origErr error) error {
// We could probably have a mode that only discards torn records right around
// the corruption to preserve as data much as possible.
// But that's not generally applicable if the records have any kind of causality.
// Maybe as an extra mode in the future if mid-WAL corruptions become
// a frequent concern.
err := errors.Cause(origErr) // So that we can pick up errors even if wrapped.
cerr, ok := err.(*CorruptionErr)
if !ok {
return errors.Wrap(origErr, "cannot handle error")
}
if cerr.Segment < 0 {
return errors.New("corruption error does not specify position")
}
level.Warn(w.logger).Log("msg", "starting corruption repair",
"segment", cerr.Segment, "offset", cerr.Offset)
// All segments behind the corruption can no longer be used.
segs, err := listSegments(w.dir)
if err != nil {
return errors.Wrap(err, "list segments")
}
level.Warn(w.logger).Log("msg", "deleting all segments newer than corrupted segment", "segment", cerr.Segment)
for _, s := range segs {
if w.segment.i == s.index {
// The active segment needs to be removed,
// close it first (Windows!). Can be closed safely
// as we set the current segment to repaired file
// below.
if err := w.segment.Close(); err != nil {
return errors.Wrap(err, "close active segment")
}
}
if s.index <= cerr.Segment {
continue
}
if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil {
return errors.Wrapf(err, "delete segment:%v", s.index)
}
}
// Regardless of the corruption offset, no record reaches into the previous segment.
// So we can safely repair the WAL by removing the segment and re-inserting all
// its records up to the corruption.
level.Warn(w.logger).Log("msg", "rewrite corrupted segment", "segment", cerr.Segment)
fn := SegmentName(w.dir, cerr.Segment)
tmpfn := fn + ".repair"
if err := fileutil.Rename(fn, tmpfn); err != nil {
return err
}
// Create a clean segment and make it the active one.
s, err := CreateSegment(w.dir, cerr.Segment)
if err != nil {
return err
}
if err := w.setSegment(s); err != nil {
return err
}
f, err := os.Open(tmpfn)
if err != nil {
return errors.Wrap(err, "open segment")
}
defer f.Close()
r := NewReader(bufio.NewReader(f))
for r.Next() {
// Add records only up to the where the error was.
if r.Offset() >= cerr.Offset {
break
}
if err := w.Log(r.Record()); err != nil {
return errors.Wrap(err, "insert record")
}
}
// We expect an error here from r.Err(), so nothing to handle.
// We explicitly close even when there is a defer for Windows to be
// able to delete it. The defer is in place to close it in-case there
// are errors above.
if err := f.Close(); err != nil {
return errors.Wrap(err, "close corrupted file")
}
if err := os.Remove(tmpfn); err != nil {
return errors.Wrap(err, "delete corrupted segment")
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"WAL",
")",
"Repair",
"(",
"origErr",
"error",
")",
"error",
"{",
"// We could probably have a mode that only discards torn records right around",
"// the corruption to preserve as data much as possible.",
"// But that's not generally applicable if the records have any kind of causality.",
"// Maybe as an extra mode in the future if mid-WAL corruptions become",
"// a frequent concern.",
"err",
":=",
"errors",
".",
"Cause",
"(",
"origErr",
")",
"// So that we can pick up errors even if wrapped.",
"\n\n",
"cerr",
",",
"ok",
":=",
"err",
".",
"(",
"*",
"CorruptionErr",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"origErr",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"cerr",
".",
"Segment",
"<",
"0",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"level",
".",
"Warn",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"cerr",
".",
"Segment",
",",
"\"",
"\"",
",",
"cerr",
".",
"Offset",
")",
"\n\n",
"// All segments behind the corruption can no longer be used.",
"segs",
",",
"err",
":=",
"listSegments",
"(",
"w",
".",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"level",
".",
"Warn",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"cerr",
".",
"Segment",
")",
"\n\n",
"for",
"_",
",",
"s",
":=",
"range",
"segs",
"{",
"if",
"w",
".",
"segment",
".",
"i",
"==",
"s",
".",
"index",
"{",
"// The active segment needs to be removed,",
"// close it first (Windows!). Can be closed safely",
"// as we set the current segment to repaired file",
"// below.",
"if",
"err",
":=",
"w",
".",
"segment",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"s",
".",
"index",
"<=",
"cerr",
".",
"Segment",
"{",
"continue",
"\n",
"}",
"\n",
"if",
"err",
":=",
"os",
".",
"Remove",
"(",
"filepath",
".",
"Join",
"(",
"w",
".",
"dir",
",",
"s",
".",
"name",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"s",
".",
"index",
")",
"\n",
"}",
"\n",
"}",
"\n",
"// Regardless of the corruption offset, no record reaches into the previous segment.",
"// So we can safely repair the WAL by removing the segment and re-inserting all",
"// its records up to the corruption.",
"level",
".",
"Warn",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"cerr",
".",
"Segment",
")",
"\n\n",
"fn",
":=",
"SegmentName",
"(",
"w",
".",
"dir",
",",
"cerr",
".",
"Segment",
")",
"\n",
"tmpfn",
":=",
"fn",
"+",
"\"",
"\"",
"\n\n",
"if",
"err",
":=",
"fileutil",
".",
"Rename",
"(",
"fn",
",",
"tmpfn",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// Create a clean segment and make it the active one.",
"s",
",",
"err",
":=",
"CreateSegment",
"(",
"w",
".",
"dir",
",",
"cerr",
".",
"Segment",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"w",
".",
"setSegment",
"(",
"s",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"f",
",",
"err",
":=",
"os",
".",
"Open",
"(",
"tmpfn",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"defer",
"f",
".",
"Close",
"(",
")",
"\n\n",
"r",
":=",
"NewReader",
"(",
"bufio",
".",
"NewReader",
"(",
"f",
")",
")",
"\n\n",
"for",
"r",
".",
"Next",
"(",
")",
"{",
"// Add records only up to the where the error was.",
"if",
"r",
".",
"Offset",
"(",
")",
">=",
"cerr",
".",
"Offset",
"{",
"break",
"\n",
"}",
"\n",
"if",
"err",
":=",
"w",
".",
"Log",
"(",
"r",
".",
"Record",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}",
"\n",
"// We expect an error here from r.Err(), so nothing to handle.",
"// We explicitly close even when there is a defer for Windows to be",
"// able to delete it. The defer is in place to close it in-case there",
"// are errors above.",
"if",
"err",
":=",
"f",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"os",
".",
"Remove",
"(",
"tmpfn",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// Repair attempts to repair the WAL based on the error.
// It discards all data after the corruption.
|
[
"Repair",
"attempts",
"to",
"repair",
"the",
"WAL",
"based",
"on",
"the",
"error",
".",
"It",
"discards",
"all",
"data",
"after",
"the",
"corruption",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L279-L371
|
train
|
prometheus/tsdb
|
wal/wal.go
|
SegmentName
|
func SegmentName(dir string, i int) string {
return filepath.Join(dir, fmt.Sprintf("%08d", i))
}
|
go
|
func SegmentName(dir string, i int) string {
return filepath.Join(dir, fmt.Sprintf("%08d", i))
}
|
[
"func",
"SegmentName",
"(",
"dir",
"string",
",",
"i",
"int",
")",
"string",
"{",
"return",
"filepath",
".",
"Join",
"(",
"dir",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"i",
")",
")",
"\n",
"}"
] |
// SegmentName builds a segment name for the directory.
|
[
"SegmentName",
"builds",
"a",
"segment",
"name",
"for",
"the",
"directory",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L374-L376
|
train
|
prometheus/tsdb
|
wal/wal.go
|
nextSegment
|
func (w *WAL) nextSegment() error {
// Only flush the current page if it actually holds data.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
next, err := CreateSegment(w.dir, w.segment.Index()+1)
if err != nil {
return errors.Wrap(err, "create new segment file")
}
prev := w.segment
if err := w.setSegment(next); err != nil {
return err
}
// Don't block further writes by fsyncing the last segment.
w.actorc <- func() {
if err := w.fsync(prev); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := prev.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
}
return nil
}
|
go
|
func (w *WAL) nextSegment() error {
// Only flush the current page if it actually holds data.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
next, err := CreateSegment(w.dir, w.segment.Index()+1)
if err != nil {
return errors.Wrap(err, "create new segment file")
}
prev := w.segment
if err := w.setSegment(next); err != nil {
return err
}
// Don't block further writes by fsyncing the last segment.
w.actorc <- func() {
if err := w.fsync(prev); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := prev.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"WAL",
")",
"nextSegment",
"(",
")",
"error",
"{",
"// Only flush the current page if it actually holds data.",
"if",
"w",
".",
"page",
".",
"alloc",
">",
"0",
"{",
"if",
"err",
":=",
"w",
".",
"flushPage",
"(",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"next",
",",
"err",
":=",
"CreateSegment",
"(",
"w",
".",
"dir",
",",
"w",
".",
"segment",
".",
"Index",
"(",
")",
"+",
"1",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"prev",
":=",
"w",
".",
"segment",
"\n",
"if",
"err",
":=",
"w",
".",
"setSegment",
"(",
"next",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Don't block further writes by fsyncing the last segment.",
"w",
".",
"actorc",
"<-",
"func",
"(",
")",
"{",
"if",
"err",
":=",
"w",
".",
"fsync",
"(",
"prev",
")",
";",
"err",
"!=",
"nil",
"{",
"level",
".",
"Error",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"prev",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"level",
".",
"Error",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// nextSegment creates the next segment and closes the previous one.
|
[
"nextSegment",
"creates",
"the",
"next",
"segment",
"and",
"closes",
"the",
"previous",
"one",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L379-L405
|
train
|
prometheus/tsdb
|
wal/wal.go
|
flushPage
|
func (w *WAL) flushPage(clear bool) error {
w.pageFlushes.Inc()
p := w.page
clear = clear || p.full()
// No more data will fit into the page. Enqueue and clear it.
if clear {
p.alloc = pageSize // Write till end of page.
w.pageCompletions.Inc()
}
n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
if err != nil {
return err
}
p.flushed += n
// We flushed an entire page, prepare a new one.
if clear {
for i := range p.buf {
p.buf[i] = 0
}
p.alloc = 0
p.flushed = 0
w.donePages++
}
return nil
}
|
go
|
func (w *WAL) flushPage(clear bool) error {
w.pageFlushes.Inc()
p := w.page
clear = clear || p.full()
// No more data will fit into the page. Enqueue and clear it.
if clear {
p.alloc = pageSize // Write till end of page.
w.pageCompletions.Inc()
}
n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
if err != nil {
return err
}
p.flushed += n
// We flushed an entire page, prepare a new one.
if clear {
for i := range p.buf {
p.buf[i] = 0
}
p.alloc = 0
p.flushed = 0
w.donePages++
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"WAL",
")",
"flushPage",
"(",
"clear",
"bool",
")",
"error",
"{",
"w",
".",
"pageFlushes",
".",
"Inc",
"(",
")",
"\n\n",
"p",
":=",
"w",
".",
"page",
"\n",
"clear",
"=",
"clear",
"||",
"p",
".",
"full",
"(",
")",
"\n\n",
"// No more data will fit into the page. Enqueue and clear it.",
"if",
"clear",
"{",
"p",
".",
"alloc",
"=",
"pageSize",
"// Write till end of page.",
"\n",
"w",
".",
"pageCompletions",
".",
"Inc",
"(",
")",
"\n",
"}",
"\n",
"n",
",",
"err",
":=",
"w",
".",
"segment",
".",
"Write",
"(",
"p",
".",
"buf",
"[",
"p",
".",
"flushed",
":",
"p",
".",
"alloc",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"p",
".",
"flushed",
"+=",
"n",
"\n\n",
"// We flushed an entire page, prepare a new one.",
"if",
"clear",
"{",
"for",
"i",
":=",
"range",
"p",
".",
"buf",
"{",
"p",
".",
"buf",
"[",
"i",
"]",
"=",
"0",
"\n",
"}",
"\n",
"p",
".",
"alloc",
"=",
"0",
"\n",
"p",
".",
"flushed",
"=",
"0",
"\n",
"w",
".",
"donePages",
"++",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// flushPage writes the new contents of the page to disk. If no more records will fit into
// the page, the remaining bytes will be set to zero and a new page will be started.
// If clear is true, this is enforced regardless of how many bytes are left in the page.
|
[
"flushPage",
"writes",
"the",
"new",
"contents",
"of",
"the",
"page",
"to",
"disk",
".",
"If",
"no",
"more",
"records",
"will",
"fit",
"into",
"the",
"page",
"the",
"remaining",
"bytes",
"will",
"be",
"set",
"to",
"zero",
"and",
"a",
"new",
"page",
"will",
"be",
"started",
".",
"If",
"clear",
"is",
"true",
"this",
"is",
"enforced",
"regardless",
"of",
"how",
"many",
"bytes",
"are",
"left",
"in",
"the",
"page",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L423-L450
|
train
|
prometheus/tsdb
|
wal/wal.go
|
Log
|
func (w *WAL) Log(recs ...[]byte) error {
w.mtx.Lock()
defer w.mtx.Unlock()
// Callers could just implement their own list record format but adding
// a bit of extra logic here frees them from that overhead.
for i, r := range recs {
if err := w.log(r, i == len(recs)-1); err != nil {
return err
}
}
return nil
}
|
go
|
func (w *WAL) Log(recs ...[]byte) error {
w.mtx.Lock()
defer w.mtx.Unlock()
// Callers could just implement their own list record format but adding
// a bit of extra logic here frees them from that overhead.
for i, r := range recs {
if err := w.log(r, i == len(recs)-1); err != nil {
return err
}
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"WAL",
")",
"Log",
"(",
"recs",
"...",
"[",
"]",
"byte",
")",
"error",
"{",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n",
"// Callers could just implement their own list record format but adding",
"// a bit of extra logic here frees them from that overhead.",
"for",
"i",
",",
"r",
":=",
"range",
"recs",
"{",
"if",
"err",
":=",
"w",
".",
"log",
"(",
"r",
",",
"i",
"==",
"len",
"(",
"recs",
")",
"-",
"1",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// Log writes the records into the log.
// Multiple records can be passed at once to reduce writes and increase throughput.
|
[
"Log",
"writes",
"the",
"records",
"into",
"the",
"log",
".",
"Multiple",
"records",
"can",
"be",
"passed",
"at",
"once",
"to",
"reduce",
"writes",
"and",
"increase",
"throughput",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L485-L496
|
train
|
prometheus/tsdb
|
wal/wal.go
|
log
|
func (w *WAL) log(rec []byte, final bool) error {
// If the record is too big to fit within the active page in the current
// segment, terminate the active segment and advance to the next one.
// This ensures that records do not cross segment boundaries.
left := w.page.remaining() - recordHeaderSize // Free space in the active page.
left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
if len(rec) > left {
if err := w.nextSegment(); err != nil {
return err
}
}
// Populate as many pages as necessary to fit the record.
// Be careful to always do one pass to ensure we write zero-length records.
for i := 0; i == 0 || len(rec) > 0; i++ {
p := w.page
// Find how much of the record we can fit into the page.
var (
l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize)
part = rec[:l]
buf = p.buf[p.alloc:]
typ recType
)
switch {
case i == 0 && len(part) == len(rec):
typ = recFull
case len(part) == len(rec):
typ = recLast
case i == 0:
typ = recFirst
default:
typ = recMiddle
}
buf[0] = byte(typ)
crc := crc32.Checksum(part, castagnoliTable)
binary.BigEndian.PutUint16(buf[1:], uint16(len(part)))
binary.BigEndian.PutUint32(buf[3:], crc)
copy(buf[recordHeaderSize:], part)
p.alloc += len(part) + recordHeaderSize
// By definition when a record is split it means its size is bigger than
// the page boundary so the current page would be full and needs to be flushed.
// On contrary if we wrote a full record, we can fit more records of the batch
// into the page before flushing it.
if final || typ != recFull || w.page.full() {
if err := w.flushPage(false); err != nil {
return err
}
}
rec = rec[l:]
}
return nil
}
|
go
|
func (w *WAL) log(rec []byte, final bool) error {
// If the record is too big to fit within the active page in the current
// segment, terminate the active segment and advance to the next one.
// This ensures that records do not cross segment boundaries.
left := w.page.remaining() - recordHeaderSize // Free space in the active page.
left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
if len(rec) > left {
if err := w.nextSegment(); err != nil {
return err
}
}
// Populate as many pages as necessary to fit the record.
// Be careful to always do one pass to ensure we write zero-length records.
for i := 0; i == 0 || len(rec) > 0; i++ {
p := w.page
// Find how much of the record we can fit into the page.
var (
l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize)
part = rec[:l]
buf = p.buf[p.alloc:]
typ recType
)
switch {
case i == 0 && len(part) == len(rec):
typ = recFull
case len(part) == len(rec):
typ = recLast
case i == 0:
typ = recFirst
default:
typ = recMiddle
}
buf[0] = byte(typ)
crc := crc32.Checksum(part, castagnoliTable)
binary.BigEndian.PutUint16(buf[1:], uint16(len(part)))
binary.BigEndian.PutUint32(buf[3:], crc)
copy(buf[recordHeaderSize:], part)
p.alloc += len(part) + recordHeaderSize
// By definition when a record is split it means its size is bigger than
// the page boundary so the current page would be full and needs to be flushed.
// On contrary if we wrote a full record, we can fit more records of the batch
// into the page before flushing it.
if final || typ != recFull || w.page.full() {
if err := w.flushPage(false); err != nil {
return err
}
}
rec = rec[l:]
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"WAL",
")",
"log",
"(",
"rec",
"[",
"]",
"byte",
",",
"final",
"bool",
")",
"error",
"{",
"// If the record is too big to fit within the active page in the current",
"// segment, terminate the active segment and advance to the next one.",
"// This ensures that records do not cross segment boundaries.",
"left",
":=",
"w",
".",
"page",
".",
"remaining",
"(",
")",
"-",
"recordHeaderSize",
"// Free space in the active page.",
"\n",
"left",
"+=",
"(",
"pageSize",
"-",
"recordHeaderSize",
")",
"*",
"(",
"w",
".",
"pagesPerSegment",
"(",
")",
"-",
"w",
".",
"donePages",
"-",
"1",
")",
"// Free pages in the active segment.",
"\n\n",
"if",
"len",
"(",
"rec",
")",
">",
"left",
"{",
"if",
"err",
":=",
"w",
".",
"nextSegment",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Populate as many pages as necessary to fit the record.",
"// Be careful to always do one pass to ensure we write zero-length records.",
"for",
"i",
":=",
"0",
";",
"i",
"==",
"0",
"||",
"len",
"(",
"rec",
")",
">",
"0",
";",
"i",
"++",
"{",
"p",
":=",
"w",
".",
"page",
"\n\n",
"// Find how much of the record we can fit into the page.",
"var",
"(",
"l",
"=",
"min",
"(",
"len",
"(",
"rec",
")",
",",
"(",
"pageSize",
"-",
"p",
".",
"alloc",
")",
"-",
"recordHeaderSize",
")",
"\n",
"part",
"=",
"rec",
"[",
":",
"l",
"]",
"\n",
"buf",
"=",
"p",
".",
"buf",
"[",
"p",
".",
"alloc",
":",
"]",
"\n",
"typ",
"recType",
"\n",
")",
"\n\n",
"switch",
"{",
"case",
"i",
"==",
"0",
"&&",
"len",
"(",
"part",
")",
"==",
"len",
"(",
"rec",
")",
":",
"typ",
"=",
"recFull",
"\n",
"case",
"len",
"(",
"part",
")",
"==",
"len",
"(",
"rec",
")",
":",
"typ",
"=",
"recLast",
"\n",
"case",
"i",
"==",
"0",
":",
"typ",
"=",
"recFirst",
"\n",
"default",
":",
"typ",
"=",
"recMiddle",
"\n",
"}",
"\n\n",
"buf",
"[",
"0",
"]",
"=",
"byte",
"(",
"typ",
")",
"\n",
"crc",
":=",
"crc32",
".",
"Checksum",
"(",
"part",
",",
"castagnoliTable",
")",
"\n",
"binary",
".",
"BigEndian",
".",
"PutUint16",
"(",
"buf",
"[",
"1",
":",
"]",
",",
"uint16",
"(",
"len",
"(",
"part",
")",
")",
")",
"\n",
"binary",
".",
"BigEndian",
".",
"PutUint32",
"(",
"buf",
"[",
"3",
":",
"]",
",",
"crc",
")",
"\n\n",
"copy",
"(",
"buf",
"[",
"recordHeaderSize",
":",
"]",
",",
"part",
")",
"\n",
"p",
".",
"alloc",
"+=",
"len",
"(",
"part",
")",
"+",
"recordHeaderSize",
"\n\n",
"// By definition when a record is split it means its size is bigger than",
"// the page boundary so the current page would be full and needs to be flushed.",
"// On contrary if we wrote a full record, we can fit more records of the batch",
"// into the page before flushing it.",
"if",
"final",
"||",
"typ",
"!=",
"recFull",
"||",
"w",
".",
"page",
".",
"full",
"(",
")",
"{",
"if",
"err",
":=",
"w",
".",
"flushPage",
"(",
"false",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"rec",
"=",
"rec",
"[",
"l",
":",
"]",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// log writes rec to the log and forces a flush of the current page if its
// the final record of a batch, the record is bigger than the page size or
// the current page is full.
|
[
"log",
"writes",
"rec",
"to",
"the",
"log",
"and",
"forces",
"a",
"flush",
"of",
"the",
"current",
"page",
"if",
"its",
"the",
"final",
"record",
"of",
"a",
"batch",
"the",
"record",
"is",
"bigger",
"than",
"the",
"page",
"size",
"or",
"the",
"current",
"page",
"is",
"full",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L501-L558
|
train
|
prometheus/tsdb
|
wal/wal.go
|
Truncate
|
func (w *WAL) Truncate(i int) (err error) {
w.truncateTotal.Inc()
defer func() {
if err != nil {
w.truncateFail.Inc()
}
}()
refs, err := listSegments(w.dir)
if err != nil {
return err
}
for _, r := range refs {
if r.index >= i {
break
}
if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil {
return err
}
}
return nil
}
|
go
|
func (w *WAL) Truncate(i int) (err error) {
w.truncateTotal.Inc()
defer func() {
if err != nil {
w.truncateFail.Inc()
}
}()
refs, err := listSegments(w.dir)
if err != nil {
return err
}
for _, r := range refs {
if r.index >= i {
break
}
if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil {
return err
}
}
return nil
}
|
[
"func",
"(",
"w",
"*",
"WAL",
")",
"Truncate",
"(",
"i",
"int",
")",
"(",
"err",
"error",
")",
"{",
"w",
".",
"truncateTotal",
".",
"Inc",
"(",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"if",
"err",
"!=",
"nil",
"{",
"w",
".",
"truncateFail",
".",
"Inc",
"(",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"refs",
",",
"err",
":=",
"listSegments",
"(",
"w",
".",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"for",
"_",
",",
"r",
":=",
"range",
"refs",
"{",
"if",
"r",
".",
"index",
">=",
"i",
"{",
"break",
"\n",
"}",
"\n",
"if",
"err",
"=",
"os",
".",
"Remove",
"(",
"filepath",
".",
"Join",
"(",
"w",
".",
"dir",
",",
"r",
".",
"name",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// Truncate drops all segments before i.
|
[
"Truncate",
"drops",
"all",
"segments",
"before",
"i",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L574-L594
|
train
|
prometheus/tsdb
|
wal/wal.go
|
Close
|
func (w *WAL) Close() (err error) {
w.mtx.Lock()
defer w.mtx.Unlock()
if w.closed {
return errors.New("wal already closed")
}
// Flush the last page and zero out all its remaining size.
// We must not flush an empty page as it would falsely signal
// the segment is done if we start writing to it again after opening.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
donec := make(chan struct{})
w.stopc <- donec
<-donec
if err = w.fsync(w.segment); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := w.segment.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
w.closed = true
return nil
}
|
go
|
func (w *WAL) Close() (err error) {
w.mtx.Lock()
defer w.mtx.Unlock()
if w.closed {
return errors.New("wal already closed")
}
// Flush the last page and zero out all its remaining size.
// We must not flush an empty page as it would falsely signal
// the segment is done if we start writing to it again after opening.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
donec := make(chan struct{})
w.stopc <- donec
<-donec
if err = w.fsync(w.segment); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := w.segment.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
w.closed = true
return nil
}
|
[
"func",
"(",
"w",
"*",
"WAL",
")",
"Close",
"(",
")",
"(",
"err",
"error",
")",
"{",
"w",
".",
"mtx",
".",
"Lock",
"(",
")",
"\n",
"defer",
"w",
".",
"mtx",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"w",
".",
"closed",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"// Flush the last page and zero out all its remaining size.",
"// We must not flush an empty page as it would falsely signal",
"// the segment is done if we start writing to it again after opening.",
"if",
"w",
".",
"page",
".",
"alloc",
">",
"0",
"{",
"if",
"err",
":=",
"w",
".",
"flushPage",
"(",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"donec",
":=",
"make",
"(",
"chan",
"struct",
"{",
"}",
")",
"\n",
"w",
".",
"stopc",
"<-",
"donec",
"\n",
"<-",
"donec",
"\n\n",
"if",
"err",
"=",
"w",
".",
"fsync",
"(",
"w",
".",
"segment",
")",
";",
"err",
"!=",
"nil",
"{",
"level",
".",
"Error",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"w",
".",
"segment",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"level",
".",
"Error",
"(",
"w",
".",
"logger",
")",
".",
"Log",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"w",
".",
"closed",
"=",
"true",
"\n",
"return",
"nil",
"\n",
"}"
] |
// Close flushes all writes and closes active segment.
|
[
"Close",
"flushes",
"all",
"writes",
"and",
"closes",
"active",
"segment",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L604-L633
|
train
|
prometheus/tsdb
|
wal/wal.go
|
NewSegmentsRangeReader
|
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) {
var segs []*Segment
for _, sgmRange := range sr {
refs, err := listSegments(sgmRange.Dir)
if err != nil {
return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir)
}
for _, r := range refs {
if sgmRange.First >= 0 && r.index < sgmRange.First {
continue
}
if sgmRange.Last >= 0 && r.index > sgmRange.Last {
break
}
s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name))
if err != nil {
return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir)
}
segs = append(segs, s)
}
}
return newSegmentBufReader(segs...), nil
}
|
go
|
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) {
var segs []*Segment
for _, sgmRange := range sr {
refs, err := listSegments(sgmRange.Dir)
if err != nil {
return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir)
}
for _, r := range refs {
if sgmRange.First >= 0 && r.index < sgmRange.First {
continue
}
if sgmRange.Last >= 0 && r.index > sgmRange.Last {
break
}
s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name))
if err != nil {
return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir)
}
segs = append(segs, s)
}
}
return newSegmentBufReader(segs...), nil
}
|
[
"func",
"NewSegmentsRangeReader",
"(",
"sr",
"...",
"SegmentRange",
")",
"(",
"io",
".",
"ReadCloser",
",",
"error",
")",
"{",
"var",
"segs",
"[",
"]",
"*",
"Segment",
"\n\n",
"for",
"_",
",",
"sgmRange",
":=",
"range",
"sr",
"{",
"refs",
",",
"err",
":=",
"listSegments",
"(",
"sgmRange",
".",
"Dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"sgmRange",
".",
"Dir",
")",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"r",
":=",
"range",
"refs",
"{",
"if",
"sgmRange",
".",
"First",
">=",
"0",
"&&",
"r",
".",
"index",
"<",
"sgmRange",
".",
"First",
"{",
"continue",
"\n",
"}",
"\n",
"if",
"sgmRange",
".",
"Last",
">=",
"0",
"&&",
"r",
".",
"index",
">",
"sgmRange",
".",
"Last",
"{",
"break",
"\n",
"}",
"\n",
"s",
",",
"err",
":=",
"OpenReadSegment",
"(",
"filepath",
".",
"Join",
"(",
"sgmRange",
".",
"Dir",
",",
"r",
".",
"name",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrapf",
"(",
"err",
",",
"\"",
"\"",
",",
"r",
".",
"name",
",",
"sgmRange",
".",
"Dir",
")",
"\n",
"}",
"\n",
"segs",
"=",
"append",
"(",
"segs",
",",
"s",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"newSegmentBufReader",
"(",
"segs",
"...",
")",
",",
"nil",
"\n",
"}"
] |
// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges.
// If first or last are -1, the range is open on the respective end.
|
[
"NewSegmentsRangeReader",
"returns",
"a",
"new",
"reader",
"over",
"the",
"given",
"WAL",
"segment",
"ranges",
".",
"If",
"first",
"or",
"last",
"are",
"-",
"1",
"the",
"range",
"is",
"open",
"on",
"the",
"respective",
"end",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L676-L700
|
train
|
prometheus/tsdb
|
index/index.go
|
NewTOCFromByteSlice
|
func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
if bs.Len() < indexTOCLen {
return nil, encoding.ErrInvalidSize
}
b := bs.Range(bs.Len()-indexTOCLen, bs.Len())
expCRC := binary.BigEndian.Uint32(b[len(b)-4:])
d := encoding.Decbuf{B: b[:len(b)-4]}
if d.Crc32(castagnoliTable) != expCRC {
return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC")
}
if err := d.Err(); err != nil {
return nil, err
}
return &TOC{
Symbols: d.Be64(),
Series: d.Be64(),
LabelIndices: d.Be64(),
LabelIndicesTable: d.Be64(),
Postings: d.Be64(),
PostingsTable: d.Be64(),
}, nil
}
|
go
|
func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
if bs.Len() < indexTOCLen {
return nil, encoding.ErrInvalidSize
}
b := bs.Range(bs.Len()-indexTOCLen, bs.Len())
expCRC := binary.BigEndian.Uint32(b[len(b)-4:])
d := encoding.Decbuf{B: b[:len(b)-4]}
if d.Crc32(castagnoliTable) != expCRC {
return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC")
}
if err := d.Err(); err != nil {
return nil, err
}
return &TOC{
Symbols: d.Be64(),
Series: d.Be64(),
LabelIndices: d.Be64(),
LabelIndicesTable: d.Be64(),
Postings: d.Be64(),
PostingsTable: d.Be64(),
}, nil
}
|
[
"func",
"NewTOCFromByteSlice",
"(",
"bs",
"ByteSlice",
")",
"(",
"*",
"TOC",
",",
"error",
")",
"{",
"if",
"bs",
".",
"Len",
"(",
")",
"<",
"indexTOCLen",
"{",
"return",
"nil",
",",
"encoding",
".",
"ErrInvalidSize",
"\n",
"}",
"\n",
"b",
":=",
"bs",
".",
"Range",
"(",
"bs",
".",
"Len",
"(",
")",
"-",
"indexTOCLen",
",",
"bs",
".",
"Len",
"(",
")",
")",
"\n\n",
"expCRC",
":=",
"binary",
".",
"BigEndian",
".",
"Uint32",
"(",
"b",
"[",
"len",
"(",
"b",
")",
"-",
"4",
":",
"]",
")",
"\n",
"d",
":=",
"encoding",
".",
"Decbuf",
"{",
"B",
":",
"b",
"[",
":",
"len",
"(",
"b",
")",
"-",
"4",
"]",
"}",
"\n\n",
"if",
"d",
".",
"Crc32",
"(",
"castagnoliTable",
")",
"!=",
"expCRC",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"encoding",
".",
"ErrInvalidChecksum",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"d",
".",
"Err",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"&",
"TOC",
"{",
"Symbols",
":",
"d",
".",
"Be64",
"(",
")",
",",
"Series",
":",
"d",
".",
"Be64",
"(",
")",
",",
"LabelIndices",
":",
"d",
".",
"Be64",
"(",
")",
",",
"LabelIndicesTable",
":",
"d",
".",
"Be64",
"(",
")",
",",
"Postings",
":",
"d",
".",
"Be64",
"(",
")",
",",
"PostingsTable",
":",
"d",
".",
"Be64",
"(",
")",
",",
"}",
",",
"nil",
"\n",
"}"
] |
// NewTOCFromByteSlice return parsed TOC from given index byte slice.
|
[
"NewTOCFromByteSlice",
"return",
"parsed",
"TOC",
"from",
"given",
"index",
"byte",
"slice",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L150-L175
|
train
|
prometheus/tsdb
|
index/index.go
|
NewWriter
|
func NewWriter(fn string) (*Writer, error) {
dir := filepath.Dir(fn)
df, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
defer df.Close() // Close for platform windows.
if err := os.RemoveAll(fn); err != nil {
return nil, errors.Wrap(err, "remove any existing index at path")
}
f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return nil, err
}
if err := df.Sync(); err != nil {
return nil, errors.Wrap(err, "sync dir")
}
iw := &Writer{
f: f,
fbuf: bufio.NewWriterSize(f, 1<<22),
pos: 0,
stage: idxStageNone,
// Reusable memory.
buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
uint32s: make([]uint32, 0, 1<<15),
// Caches.
symbols: make(map[string]uint32, 1<<13),
seriesOffsets: make(map[uint64]uint64, 1<<16),
crc32: newCRC32(),
}
if err := iw.writeMeta(); err != nil {
return nil, err
}
return iw, nil
}
|
go
|
func NewWriter(fn string) (*Writer, error) {
dir := filepath.Dir(fn)
df, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
defer df.Close() // Close for platform windows.
if err := os.RemoveAll(fn); err != nil {
return nil, errors.Wrap(err, "remove any existing index at path")
}
f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return nil, err
}
if err := df.Sync(); err != nil {
return nil, errors.Wrap(err, "sync dir")
}
iw := &Writer{
f: f,
fbuf: bufio.NewWriterSize(f, 1<<22),
pos: 0,
stage: idxStageNone,
// Reusable memory.
buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
uint32s: make([]uint32, 0, 1<<15),
// Caches.
symbols: make(map[string]uint32, 1<<13),
seriesOffsets: make(map[uint64]uint64, 1<<16),
crc32: newCRC32(),
}
if err := iw.writeMeta(); err != nil {
return nil, err
}
return iw, nil
}
|
[
"func",
"NewWriter",
"(",
"fn",
"string",
")",
"(",
"*",
"Writer",
",",
"error",
")",
"{",
"dir",
":=",
"filepath",
".",
"Dir",
"(",
"fn",
")",
"\n\n",
"df",
",",
"err",
":=",
"fileutil",
".",
"OpenDir",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"defer",
"df",
".",
"Close",
"(",
")",
"// Close for platform windows.",
"\n\n",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"fn",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"f",
",",
"err",
":=",
"os",
".",
"OpenFile",
"(",
"fn",
",",
"os",
".",
"O_CREATE",
"|",
"os",
".",
"O_WRONLY",
",",
"0666",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"df",
".",
"Sync",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"iw",
":=",
"&",
"Writer",
"{",
"f",
":",
"f",
",",
"fbuf",
":",
"bufio",
".",
"NewWriterSize",
"(",
"f",
",",
"1",
"<<",
"22",
")",
",",
"pos",
":",
"0",
",",
"stage",
":",
"idxStageNone",
",",
"// Reusable memory.",
"buf1",
":",
"encoding",
".",
"Encbuf",
"{",
"B",
":",
"make",
"(",
"[",
"]",
"byte",
",",
"0",
",",
"1",
"<<",
"22",
")",
"}",
",",
"buf2",
":",
"encoding",
".",
"Encbuf",
"{",
"B",
":",
"make",
"(",
"[",
"]",
"byte",
",",
"0",
",",
"1",
"<<",
"22",
")",
"}",
",",
"uint32s",
":",
"make",
"(",
"[",
"]",
"uint32",
",",
"0",
",",
"1",
"<<",
"15",
")",
",",
"// Caches.",
"symbols",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"uint32",
",",
"1",
"<<",
"13",
")",
",",
"seriesOffsets",
":",
"make",
"(",
"map",
"[",
"uint64",
"]",
"uint64",
",",
"1",
"<<",
"16",
")",
",",
"crc32",
":",
"newCRC32",
"(",
")",
",",
"}",
"\n",
"if",
"err",
":=",
"iw",
".",
"writeMeta",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"iw",
",",
"nil",
"\n",
"}"
] |
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
|
[
"NewWriter",
"returns",
"a",
"new",
"Writer",
"to",
"the",
"given",
"filename",
".",
"It",
"serializes",
"data",
"in",
"format",
"version",
"2",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L178-L219
|
train
|
prometheus/tsdb
|
index/index.go
|
addPadding
|
func (w *Writer) addPadding(size int) error {
p := w.pos % uint64(size)
if p == 0 {
return nil
}
p = uint64(size) - p
return errors.Wrap(w.write(make([]byte, p)), "add padding")
}
|
go
|
func (w *Writer) addPadding(size int) error {
p := w.pos % uint64(size)
if p == 0 {
return nil
}
p = uint64(size) - p
return errors.Wrap(w.write(make([]byte, p)), "add padding")
}
|
[
"func",
"(",
"w",
"*",
"Writer",
")",
"addPadding",
"(",
"size",
"int",
")",
"error",
"{",
"p",
":=",
"w",
".",
"pos",
"%",
"uint64",
"(",
"size",
")",
"\n",
"if",
"p",
"==",
"0",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"p",
"=",
"uint64",
"(",
"size",
")",
"-",
"p",
"\n",
"return",
"errors",
".",
"Wrap",
"(",
"w",
".",
"write",
"(",
"make",
"(",
"[",
"]",
"byte",
",",
"p",
")",
")",
",",
"\"",
"\"",
")",
"\n",
"}"
] |
// addPadding adds zero byte padding until the file size is a multiple size.
|
[
"addPadding",
"adds",
"zero",
"byte",
"padding",
"until",
"the",
"file",
"size",
"is",
"a",
"multiple",
"size",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L240-L247
|
train
|
prometheus/tsdb
|
index/index.go
|
ensureStage
|
func (w *Writer) ensureStage(s indexWriterStage) error {
if w.stage == s {
return nil
}
if w.stage > s {
return errors.Errorf("invalid stage %q, currently at %q", s, w.stage)
}
// Mark start of sections in table of contents.
switch s {
case idxStageSymbols:
w.toc.Symbols = w.pos
case idxStageSeries:
w.toc.Series = w.pos
case idxStageLabelIndex:
w.toc.LabelIndices = w.pos
case idxStagePostings:
w.toc.Postings = w.pos
case idxStageDone:
w.toc.LabelIndicesTable = w.pos
if err := w.writeOffsetTable(w.labelIndexes); err != nil {
return err
}
w.toc.PostingsTable = w.pos
if err := w.writeOffsetTable(w.postings); err != nil {
return err
}
if err := w.writeTOC(); err != nil {
return err
}
}
w.stage = s
return nil
}
|
go
|
func (w *Writer) ensureStage(s indexWriterStage) error {
if w.stage == s {
return nil
}
if w.stage > s {
return errors.Errorf("invalid stage %q, currently at %q", s, w.stage)
}
// Mark start of sections in table of contents.
switch s {
case idxStageSymbols:
w.toc.Symbols = w.pos
case idxStageSeries:
w.toc.Series = w.pos
case idxStageLabelIndex:
w.toc.LabelIndices = w.pos
case idxStagePostings:
w.toc.Postings = w.pos
case idxStageDone:
w.toc.LabelIndicesTable = w.pos
if err := w.writeOffsetTable(w.labelIndexes); err != nil {
return err
}
w.toc.PostingsTable = w.pos
if err := w.writeOffsetTable(w.postings); err != nil {
return err
}
if err := w.writeTOC(); err != nil {
return err
}
}
w.stage = s
return nil
}
|
[
"func",
"(",
"w",
"*",
"Writer",
")",
"ensureStage",
"(",
"s",
"indexWriterStage",
")",
"error",
"{",
"if",
"w",
".",
"stage",
"==",
"s",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"w",
".",
"stage",
">",
"s",
"{",
"return",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"s",
",",
"w",
".",
"stage",
")",
"\n",
"}",
"\n\n",
"// Mark start of sections in table of contents.",
"switch",
"s",
"{",
"case",
"idxStageSymbols",
":",
"w",
".",
"toc",
".",
"Symbols",
"=",
"w",
".",
"pos",
"\n",
"case",
"idxStageSeries",
":",
"w",
".",
"toc",
".",
"Series",
"=",
"w",
".",
"pos",
"\n\n",
"case",
"idxStageLabelIndex",
":",
"w",
".",
"toc",
".",
"LabelIndices",
"=",
"w",
".",
"pos",
"\n\n",
"case",
"idxStagePostings",
":",
"w",
".",
"toc",
".",
"Postings",
"=",
"w",
".",
"pos",
"\n\n",
"case",
"idxStageDone",
":",
"w",
".",
"toc",
".",
"LabelIndicesTable",
"=",
"w",
".",
"pos",
"\n",
"if",
"err",
":=",
"w",
".",
"writeOffsetTable",
"(",
"w",
".",
"labelIndexes",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"w",
".",
"toc",
".",
"PostingsTable",
"=",
"w",
".",
"pos",
"\n",
"if",
"err",
":=",
"w",
".",
"writeOffsetTable",
"(",
"w",
".",
"postings",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"w",
".",
"writeTOC",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"w",
".",
"stage",
"=",
"s",
"\n",
"return",
"nil",
"\n",
"}"
] |
// ensureStage handles transitions between write stages and ensures that IndexWriter
// methods are called in an order valid for the implementation.
|
[
"ensureStage",
"handles",
"transitions",
"between",
"write",
"stages",
"and",
"ensures",
"that",
"IndexWriter",
"methods",
"are",
"called",
"in",
"an",
"order",
"valid",
"for",
"the",
"implementation",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L251-L288
|
train
|
prometheus/tsdb
|
index/index.go
|
writeOffsetTable
|
func (w *Writer) writeOffsetTable(entries []hashEntry) error {
w.buf2.Reset()
w.buf2.PutBE32int(len(entries))
for _, e := range entries {
w.buf2.PutUvarint(len(e.keys))
for _, k := range e.keys {
w.buf2.PutUvarintStr(k)
}
w.buf2.PutUvarint64(e.offset)
}
w.buf1.Reset()
w.buf1.PutBE32int(w.buf2.Len())
w.buf2.PutHash(w.crc32)
return w.write(w.buf1.Get(), w.buf2.Get())
}
|
go
|
func (w *Writer) writeOffsetTable(entries []hashEntry) error {
w.buf2.Reset()
w.buf2.PutBE32int(len(entries))
for _, e := range entries {
w.buf2.PutUvarint(len(e.keys))
for _, k := range e.keys {
w.buf2.PutUvarintStr(k)
}
w.buf2.PutUvarint64(e.offset)
}
w.buf1.Reset()
w.buf1.PutBE32int(w.buf2.Len())
w.buf2.PutHash(w.crc32)
return w.write(w.buf1.Get(), w.buf2.Get())
}
|
[
"func",
"(",
"w",
"*",
"Writer",
")",
"writeOffsetTable",
"(",
"entries",
"[",
"]",
"hashEntry",
")",
"error",
"{",
"w",
".",
"buf2",
".",
"Reset",
"(",
")",
"\n",
"w",
".",
"buf2",
".",
"PutBE32int",
"(",
"len",
"(",
"entries",
")",
")",
"\n\n",
"for",
"_",
",",
"e",
":=",
"range",
"entries",
"{",
"w",
".",
"buf2",
".",
"PutUvarint",
"(",
"len",
"(",
"e",
".",
"keys",
")",
")",
"\n",
"for",
"_",
",",
"k",
":=",
"range",
"e",
".",
"keys",
"{",
"w",
".",
"buf2",
".",
"PutUvarintStr",
"(",
"k",
")",
"\n",
"}",
"\n",
"w",
".",
"buf2",
".",
"PutUvarint64",
"(",
"e",
".",
"offset",
")",
"\n",
"}",
"\n\n",
"w",
".",
"buf1",
".",
"Reset",
"(",
")",
"\n",
"w",
".",
"buf1",
".",
"PutBE32int",
"(",
"w",
".",
"buf2",
".",
"Len",
"(",
")",
")",
"\n",
"w",
".",
"buf2",
".",
"PutHash",
"(",
"w",
".",
"crc32",
")",
"\n\n",
"return",
"w",
".",
"write",
"(",
"w",
".",
"buf1",
".",
"Get",
"(",
")",
",",
"w",
".",
"buf2",
".",
"Get",
"(",
")",
")",
"\n",
"}"
] |
// writeOffsetTable writes a sequence of readable hash entries.
|
[
"writeOffsetTable",
"writes",
"a",
"sequence",
"of",
"readable",
"hash",
"entries",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L451-L468
|
train
|
prometheus/tsdb
|
index/index.go
|
NewReader
|
func NewReader(b ByteSlice) (*Reader, error) {
return newReader(b, ioutil.NopCloser(nil))
}
|
go
|
func NewReader(b ByteSlice) (*Reader, error) {
return newReader(b, ioutil.NopCloser(nil))
}
|
[
"func",
"NewReader",
"(",
"b",
"ByteSlice",
")",
"(",
"*",
"Reader",
",",
"error",
")",
"{",
"return",
"newReader",
"(",
"b",
",",
"ioutil",
".",
"NopCloser",
"(",
"nil",
")",
")",
"\n",
"}"
] |
// NewReader returns a new index reader on the given byte slice. It automatically
// handles different format versions.
|
[
"NewReader",
"returns",
"a",
"new",
"index",
"reader",
"on",
"the",
"given",
"byte",
"slice",
".",
"It",
"automatically",
"handles",
"different",
"format",
"versions",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L618-L620
|
train
|
prometheus/tsdb
|
index/index.go
|
NewFileReader
|
func NewFileReader(path string) (*Reader, error) {
f, err := fileutil.OpenMmapFile(path)
if err != nil {
return nil, err
}
r, err := newReader(realByteSlice(f.Bytes()), f)
if err != nil {
var merr tsdb_errors.MultiError
merr.Add(err)
merr.Add(f.Close())
return nil, merr
}
return r, nil
}
|
go
|
func NewFileReader(path string) (*Reader, error) {
f, err := fileutil.OpenMmapFile(path)
if err != nil {
return nil, err
}
r, err := newReader(realByteSlice(f.Bytes()), f)
if err != nil {
var merr tsdb_errors.MultiError
merr.Add(err)
merr.Add(f.Close())
return nil, merr
}
return r, nil
}
|
[
"func",
"NewFileReader",
"(",
"path",
"string",
")",
"(",
"*",
"Reader",
",",
"error",
")",
"{",
"f",
",",
"err",
":=",
"fileutil",
".",
"OpenMmapFile",
"(",
"path",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"r",
",",
"err",
":=",
"newReader",
"(",
"realByteSlice",
"(",
"f",
".",
"Bytes",
"(",
")",
")",
",",
"f",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"var",
"merr",
"tsdb_errors",
".",
"MultiError",
"\n",
"merr",
".",
"Add",
"(",
"err",
")",
"\n",
"merr",
".",
"Add",
"(",
"f",
".",
"Close",
"(",
")",
")",
"\n",
"return",
"nil",
",",
"merr",
"\n",
"}",
"\n\n",
"return",
"r",
",",
"nil",
"\n",
"}"
] |
// NewFileReader returns a new index reader against the given index file.
|
[
"NewFileReader",
"returns",
"a",
"new",
"index",
"reader",
"against",
"the",
"given",
"index",
"file",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L623-L637
|
train
|
prometheus/tsdb
|
index/index.go
|
PostingsRanges
|
func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) {
m := map[labels.Label]Range{}
for k, e := range r.postings {
for v, start := range e {
d := encoding.NewDecbufAt(r.b, int(start), castagnoliTable)
if d.Err() != nil {
return nil, d.Err()
}
m[labels.Label{Name: k, Value: v}] = Range{
Start: int64(start) + 4,
End: int64(start) + 4 + int64(d.Len()),
}
}
}
return m, nil
}
|
go
|
func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) {
m := map[labels.Label]Range{}
for k, e := range r.postings {
for v, start := range e {
d := encoding.NewDecbufAt(r.b, int(start), castagnoliTable)
if d.Err() != nil {
return nil, d.Err()
}
m[labels.Label{Name: k, Value: v}] = Range{
Start: int64(start) + 4,
End: int64(start) + 4 + int64(d.Len()),
}
}
}
return m, nil
}
|
[
"func",
"(",
"r",
"*",
"Reader",
")",
"PostingsRanges",
"(",
")",
"(",
"map",
"[",
"labels",
".",
"Label",
"]",
"Range",
",",
"error",
")",
"{",
"m",
":=",
"map",
"[",
"labels",
".",
"Label",
"]",
"Range",
"{",
"}",
"\n\n",
"for",
"k",
",",
"e",
":=",
"range",
"r",
".",
"postings",
"{",
"for",
"v",
",",
"start",
":=",
"range",
"e",
"{",
"d",
":=",
"encoding",
".",
"NewDecbufAt",
"(",
"r",
".",
"b",
",",
"int",
"(",
"start",
")",
",",
"castagnoliTable",
")",
"\n",
"if",
"d",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"nil",
",",
"d",
".",
"Err",
"(",
")",
"\n",
"}",
"\n",
"m",
"[",
"labels",
".",
"Label",
"{",
"Name",
":",
"k",
",",
"Value",
":",
"v",
"}",
"]",
"=",
"Range",
"{",
"Start",
":",
"int64",
"(",
"start",
")",
"+",
"4",
",",
"End",
":",
"int64",
"(",
"start",
")",
"+",
"4",
"+",
"int64",
"(",
"d",
".",
"Len",
"(",
")",
")",
",",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"m",
",",
"nil",
"\n",
"}"
] |
// PostingsRanges returns a new map of byte range in the underlying index file
// for all postings lists.
|
[
"PostingsRanges",
"returns",
"a",
"new",
"map",
"of",
"byte",
"range",
"in",
"the",
"underlying",
"index",
"file",
"for",
"all",
"postings",
"lists",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L725-L741
|
train
|
prometheus/tsdb
|
index/index.go
|
ReadSymbols
|
func ReadSymbols(bs ByteSlice, version int, off int) ([]string, map[uint32]string, error) {
if off == 0 {
return nil, nil, nil
}
d := encoding.NewDecbufAt(bs, off, castagnoliTable)
var (
origLen = d.Len()
cnt = d.Be32int()
basePos = uint32(off) + 4
nextPos = basePos + uint32(origLen-d.Len())
symbolSlice []string
symbols = map[uint32]string{}
)
if version == FormatV2 {
symbolSlice = make([]string, 0, cnt)
}
for d.Err() == nil && d.Len() > 0 && cnt > 0 {
s := d.UvarintStr()
if version == FormatV2 {
symbolSlice = append(symbolSlice, s)
} else {
symbols[nextPos] = s
nextPos = basePos + uint32(origLen-d.Len())
}
cnt--
}
return symbolSlice, symbols, errors.Wrap(d.Err(), "read symbols")
}
|
go
|
func ReadSymbols(bs ByteSlice, version int, off int) ([]string, map[uint32]string, error) {
if off == 0 {
return nil, nil, nil
}
d := encoding.NewDecbufAt(bs, off, castagnoliTable)
var (
origLen = d.Len()
cnt = d.Be32int()
basePos = uint32(off) + 4
nextPos = basePos + uint32(origLen-d.Len())
symbolSlice []string
symbols = map[uint32]string{}
)
if version == FormatV2 {
symbolSlice = make([]string, 0, cnt)
}
for d.Err() == nil && d.Len() > 0 && cnt > 0 {
s := d.UvarintStr()
if version == FormatV2 {
symbolSlice = append(symbolSlice, s)
} else {
symbols[nextPos] = s
nextPos = basePos + uint32(origLen-d.Len())
}
cnt--
}
return symbolSlice, symbols, errors.Wrap(d.Err(), "read symbols")
}
|
[
"func",
"ReadSymbols",
"(",
"bs",
"ByteSlice",
",",
"version",
"int",
",",
"off",
"int",
")",
"(",
"[",
"]",
"string",
",",
"map",
"[",
"uint32",
"]",
"string",
",",
"error",
")",
"{",
"if",
"off",
"==",
"0",
"{",
"return",
"nil",
",",
"nil",
",",
"nil",
"\n",
"}",
"\n",
"d",
":=",
"encoding",
".",
"NewDecbufAt",
"(",
"bs",
",",
"off",
",",
"castagnoliTable",
")",
"\n\n",
"var",
"(",
"origLen",
"=",
"d",
".",
"Len",
"(",
")",
"\n",
"cnt",
"=",
"d",
".",
"Be32int",
"(",
")",
"\n",
"basePos",
"=",
"uint32",
"(",
"off",
")",
"+",
"4",
"\n",
"nextPos",
"=",
"basePos",
"+",
"uint32",
"(",
"origLen",
"-",
"d",
".",
"Len",
"(",
")",
")",
"\n",
"symbolSlice",
"[",
"]",
"string",
"\n",
"symbols",
"=",
"map",
"[",
"uint32",
"]",
"string",
"{",
"}",
"\n",
")",
"\n",
"if",
"version",
"==",
"FormatV2",
"{",
"symbolSlice",
"=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"cnt",
")",
"\n",
"}",
"\n\n",
"for",
"d",
".",
"Err",
"(",
")",
"==",
"nil",
"&&",
"d",
".",
"Len",
"(",
")",
">",
"0",
"&&",
"cnt",
">",
"0",
"{",
"s",
":=",
"d",
".",
"UvarintStr",
"(",
")",
"\n\n",
"if",
"version",
"==",
"FormatV2",
"{",
"symbolSlice",
"=",
"append",
"(",
"symbolSlice",
",",
"s",
")",
"\n",
"}",
"else",
"{",
"symbols",
"[",
"nextPos",
"]",
"=",
"s",
"\n",
"nextPos",
"=",
"basePos",
"+",
"uint32",
"(",
"origLen",
"-",
"d",
".",
"Len",
"(",
")",
")",
"\n",
"}",
"\n",
"cnt",
"--",
"\n",
"}",
"\n",
"return",
"symbolSlice",
",",
"symbols",
",",
"errors",
".",
"Wrap",
"(",
"d",
".",
"Err",
"(",
")",
",",
"\"",
"\"",
")",
"\n",
"}"
] |
// ReadSymbols reads the symbol table fully into memory and allocates proper strings for them.
// Strings backed by the mmap'd memory would cause memory faults if applications keep using them
// after the reader is closed.
|
[
"ReadSymbols",
"reads",
"the",
"symbol",
"table",
"fully",
"into",
"memory",
"and",
"allocates",
"proper",
"strings",
"for",
"them",
".",
"Strings",
"backed",
"by",
"the",
"mmap",
"d",
"memory",
"would",
"cause",
"memory",
"faults",
"if",
"applications",
"keep",
"using",
"them",
"after",
"the",
"reader",
"is",
"closed",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L746-L776
|
train
|
prometheus/tsdb
|
index/index.go
|
ReadOffsetTable
|
func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64) error) error {
d := encoding.NewDecbufAt(bs, int(off), castagnoliTable)
cnt := d.Be32()
for d.Err() == nil && d.Len() > 0 && cnt > 0 {
keyCount := d.Uvarint()
keys := make([]string, 0, keyCount)
for i := 0; i < keyCount; i++ {
keys = append(keys, d.UvarintStr())
}
o := d.Uvarint64()
if d.Err() != nil {
break
}
if err := f(keys, o); err != nil {
return err
}
cnt--
}
return d.Err()
}
|
go
|
func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64) error) error {
d := encoding.NewDecbufAt(bs, int(off), castagnoliTable)
cnt := d.Be32()
for d.Err() == nil && d.Len() > 0 && cnt > 0 {
keyCount := d.Uvarint()
keys := make([]string, 0, keyCount)
for i := 0; i < keyCount; i++ {
keys = append(keys, d.UvarintStr())
}
o := d.Uvarint64()
if d.Err() != nil {
break
}
if err := f(keys, o); err != nil {
return err
}
cnt--
}
return d.Err()
}
|
[
"func",
"ReadOffsetTable",
"(",
"bs",
"ByteSlice",
",",
"off",
"uint64",
",",
"f",
"func",
"(",
"[",
"]",
"string",
",",
"uint64",
")",
"error",
")",
"error",
"{",
"d",
":=",
"encoding",
".",
"NewDecbufAt",
"(",
"bs",
",",
"int",
"(",
"off",
")",
",",
"castagnoliTable",
")",
"\n",
"cnt",
":=",
"d",
".",
"Be32",
"(",
")",
"\n\n",
"for",
"d",
".",
"Err",
"(",
")",
"==",
"nil",
"&&",
"d",
".",
"Len",
"(",
")",
">",
"0",
"&&",
"cnt",
">",
"0",
"{",
"keyCount",
":=",
"d",
".",
"Uvarint",
"(",
")",
"\n",
"keys",
":=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"keyCount",
")",
"\n\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"keyCount",
";",
"i",
"++",
"{",
"keys",
"=",
"append",
"(",
"keys",
",",
"d",
".",
"UvarintStr",
"(",
")",
")",
"\n",
"}",
"\n",
"o",
":=",
"d",
".",
"Uvarint64",
"(",
")",
"\n",
"if",
"d",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"break",
"\n",
"}",
"\n",
"if",
"err",
":=",
"f",
"(",
"keys",
",",
"o",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cnt",
"--",
"\n",
"}",
"\n",
"return",
"d",
".",
"Err",
"(",
")",
"\n",
"}"
] |
// ReadOffsetTable reads an offset table and at the given position calls f for each
// found entry. If f returns an error it stops decoding and returns the received error.
|
[
"ReadOffsetTable",
"reads",
"an",
"offset",
"table",
"and",
"at",
"the",
"given",
"position",
"calls",
"f",
"for",
"each",
"found",
"entry",
".",
"If",
"f",
"returns",
"an",
"error",
"it",
"stops",
"decoding",
"and",
"returns",
"the",
"received",
"error",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L780-L801
|
train
|
prometheus/tsdb
|
index/index.go
|
Symbols
|
func (r *Reader) Symbols() (map[string]struct{}, error) {
res := make(map[string]struct{}, len(r.symbolsV1)+len(r.symbolsV2))
for _, s := range r.symbolsV1 {
res[s] = struct{}{}
}
for _, s := range r.symbolsV2 {
res[s] = struct{}{}
}
return res, nil
}
|
go
|
func (r *Reader) Symbols() (map[string]struct{}, error) {
res := make(map[string]struct{}, len(r.symbolsV1)+len(r.symbolsV2))
for _, s := range r.symbolsV1 {
res[s] = struct{}{}
}
for _, s := range r.symbolsV2 {
res[s] = struct{}{}
}
return res, nil
}
|
[
"func",
"(",
"r",
"*",
"Reader",
")",
"Symbols",
"(",
")",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
",",
"error",
")",
"{",
"res",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
",",
"len",
"(",
"r",
".",
"symbolsV1",
")",
"+",
"len",
"(",
"r",
".",
"symbolsV2",
")",
")",
"\n\n",
"for",
"_",
",",
"s",
":=",
"range",
"r",
".",
"symbolsV1",
"{",
"res",
"[",
"s",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"for",
"_",
",",
"s",
":=",
"range",
"r",
".",
"symbolsV2",
"{",
"res",
"[",
"s",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"return",
"res",
",",
"nil",
"\n",
"}"
] |
// Symbols returns a set of symbols that exist within the index.
|
[
"Symbols",
"returns",
"a",
"set",
"of",
"symbols",
"that",
"exist",
"within",
"the",
"index",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L820-L830
|
train
|
prometheus/tsdb
|
index/index.go
|
LabelValues
|
func (r *Reader) LabelValues(names ...string) (StringTuples, error) {
key := strings.Join(names, labelNameSeperator)
off, ok := r.labels[key]
if !ok {
// XXX(fabxc): hot fix. Should return a partial data error and handle cases
// where the entire block has no data gracefully.
return emptyStringTuples{}, nil
//return nil, fmt.Errorf("label index doesn't exist")
}
d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable)
nc := d.Be32int()
d.Be32() // consume unused value entry count.
if d.Err() != nil {
return nil, errors.Wrap(d.Err(), "read label value index")
}
st := &serializedStringTuples{
idsCount: nc,
idsBytes: d.Get(),
lookup: r.lookupSymbol,
}
return st, nil
}
|
go
|
func (r *Reader) LabelValues(names ...string) (StringTuples, error) {
key := strings.Join(names, labelNameSeperator)
off, ok := r.labels[key]
if !ok {
// XXX(fabxc): hot fix. Should return a partial data error and handle cases
// where the entire block has no data gracefully.
return emptyStringTuples{}, nil
//return nil, fmt.Errorf("label index doesn't exist")
}
d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable)
nc := d.Be32int()
d.Be32() // consume unused value entry count.
if d.Err() != nil {
return nil, errors.Wrap(d.Err(), "read label value index")
}
st := &serializedStringTuples{
idsCount: nc,
idsBytes: d.Get(),
lookup: r.lookupSymbol,
}
return st, nil
}
|
[
"func",
"(",
"r",
"*",
"Reader",
")",
"LabelValues",
"(",
"names",
"...",
"string",
")",
"(",
"StringTuples",
",",
"error",
")",
"{",
"key",
":=",
"strings",
".",
"Join",
"(",
"names",
",",
"labelNameSeperator",
")",
"\n",
"off",
",",
"ok",
":=",
"r",
".",
"labels",
"[",
"key",
"]",
"\n",
"if",
"!",
"ok",
"{",
"// XXX(fabxc): hot fix. Should return a partial data error and handle cases",
"// where the entire block has no data gracefully.",
"return",
"emptyStringTuples",
"{",
"}",
",",
"nil",
"\n",
"//return nil, fmt.Errorf(\"label index doesn't exist\")",
"}",
"\n\n",
"d",
":=",
"encoding",
".",
"NewDecbufAt",
"(",
"r",
".",
"b",
",",
"int",
"(",
"off",
")",
",",
"castagnoliTable",
")",
"\n\n",
"nc",
":=",
"d",
".",
"Be32int",
"(",
")",
"\n",
"d",
".",
"Be32",
"(",
")",
"// consume unused value entry count.",
"\n\n",
"if",
"d",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"d",
".",
"Err",
"(",
")",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"st",
":=",
"&",
"serializedStringTuples",
"{",
"idsCount",
":",
"nc",
",",
"idsBytes",
":",
"d",
".",
"Get",
"(",
")",
",",
"lookup",
":",
"r",
".",
"lookupSymbol",
",",
"}",
"\n",
"return",
"st",
",",
"nil",
"\n",
"}"
] |
// LabelValues returns value tuples that exist for the given label name tuples.
|
[
"LabelValues",
"returns",
"value",
"tuples",
"that",
"exist",
"for",
"the",
"given",
"label",
"name",
"tuples",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L838-L863
|
train
|
prometheus/tsdb
|
index/index.go
|
Series
|
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
offset := id
// In version 2 series IDs are no longer exact references but series are 16-byte padded
// and the ID is the multiple of 16 of the actual position.
if r.version == FormatV2 {
offset = id * 16
}
d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)
if d.Err() != nil {
return d.Err()
}
return errors.Wrap(r.dec.Series(d.Get(), lbls, chks), "read series")
}
|
go
|
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
offset := id
// In version 2 series IDs are no longer exact references but series are 16-byte padded
// and the ID is the multiple of 16 of the actual position.
if r.version == FormatV2 {
offset = id * 16
}
d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)
if d.Err() != nil {
return d.Err()
}
return errors.Wrap(r.dec.Series(d.Get(), lbls, chks), "read series")
}
|
[
"func",
"(",
"r",
"*",
"Reader",
")",
"Series",
"(",
"id",
"uint64",
",",
"lbls",
"*",
"labels",
".",
"Labels",
",",
"chks",
"*",
"[",
"]",
"chunks",
".",
"Meta",
")",
"error",
"{",
"offset",
":=",
"id",
"\n",
"// In version 2 series IDs are no longer exact references but series are 16-byte padded",
"// and the ID is the multiple of 16 of the actual position.",
"if",
"r",
".",
"version",
"==",
"FormatV2",
"{",
"offset",
"=",
"id",
"*",
"16",
"\n",
"}",
"\n",
"d",
":=",
"encoding",
".",
"NewDecbufUvarintAt",
"(",
"r",
".",
"b",
",",
"int",
"(",
"offset",
")",
",",
"castagnoliTable",
")",
"\n",
"if",
"d",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"d",
".",
"Err",
"(",
")",
"\n",
"}",
"\n",
"return",
"errors",
".",
"Wrap",
"(",
"r",
".",
"dec",
".",
"Series",
"(",
"d",
".",
"Get",
"(",
")",
",",
"lbls",
",",
"chks",
")",
",",
"\"",
"\"",
")",
"\n",
"}"
] |
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
|
[
"Series",
"reads",
"the",
"series",
"with",
"the",
"given",
"ID",
"and",
"writes",
"its",
"labels",
"and",
"chunks",
"into",
"lbls",
"and",
"chks",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L881-L893
|
train
|
prometheus/tsdb
|
index/index.go
|
Postings
|
func (r *Reader) Postings(name, value string) (Postings, error) {
e, ok := r.postings[name]
if !ok {
return EmptyPostings(), nil
}
off, ok := e[value]
if !ok {
return EmptyPostings(), nil
}
d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable)
if d.Err() != nil {
return nil, errors.Wrap(d.Err(), "get postings entry")
}
_, p, err := r.dec.Postings(d.Get())
if err != nil {
return nil, errors.Wrap(err, "decode postings")
}
return p, nil
}
|
go
|
func (r *Reader) Postings(name, value string) (Postings, error) {
e, ok := r.postings[name]
if !ok {
return EmptyPostings(), nil
}
off, ok := e[value]
if !ok {
return EmptyPostings(), nil
}
d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable)
if d.Err() != nil {
return nil, errors.Wrap(d.Err(), "get postings entry")
}
_, p, err := r.dec.Postings(d.Get())
if err != nil {
return nil, errors.Wrap(err, "decode postings")
}
return p, nil
}
|
[
"func",
"(",
"r",
"*",
"Reader",
")",
"Postings",
"(",
"name",
",",
"value",
"string",
")",
"(",
"Postings",
",",
"error",
")",
"{",
"e",
",",
"ok",
":=",
"r",
".",
"postings",
"[",
"name",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"EmptyPostings",
"(",
")",
",",
"nil",
"\n",
"}",
"\n",
"off",
",",
"ok",
":=",
"e",
"[",
"value",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"EmptyPostings",
"(",
")",
",",
"nil",
"\n",
"}",
"\n",
"d",
":=",
"encoding",
".",
"NewDecbufAt",
"(",
"r",
".",
"b",
",",
"int",
"(",
"off",
")",
",",
"castagnoliTable",
")",
"\n",
"if",
"d",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"d",
".",
"Err",
"(",
")",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"_",
",",
"p",
",",
"err",
":=",
"r",
".",
"dec",
".",
"Postings",
"(",
"d",
".",
"Get",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"p",
",",
"nil",
"\n",
"}"
] |
// Postings returns a postings list for the given label pair.
|
[
"Postings",
"returns",
"a",
"postings",
"list",
"for",
"the",
"given",
"label",
"pair",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L896-L914
|
train
|
prometheus/tsdb
|
index/index.go
|
LabelNames
|
func (r *Reader) LabelNames() ([]string, error) {
labelNamesMap := make(map[string]struct{}, len(r.labels))
for key := range r.labels {
// 'key' contains the label names concatenated with the
// delimiter 'labelNameSeperator'.
names := strings.Split(key, labelNameSeperator)
for _, name := range names {
if name == allPostingsKey.Name {
// This is not from any metric.
// It is basically an empty label name.
continue
}
labelNamesMap[name] = struct{}{}
}
}
labelNames := make([]string, 0, len(labelNamesMap))
for name := range labelNamesMap {
labelNames = append(labelNames, name)
}
sort.Strings(labelNames)
return labelNames, nil
}
|
go
|
func (r *Reader) LabelNames() ([]string, error) {
labelNamesMap := make(map[string]struct{}, len(r.labels))
for key := range r.labels {
// 'key' contains the label names concatenated with the
// delimiter 'labelNameSeperator'.
names := strings.Split(key, labelNameSeperator)
for _, name := range names {
if name == allPostingsKey.Name {
// This is not from any metric.
// It is basically an empty label name.
continue
}
labelNamesMap[name] = struct{}{}
}
}
labelNames := make([]string, 0, len(labelNamesMap))
for name := range labelNamesMap {
labelNames = append(labelNames, name)
}
sort.Strings(labelNames)
return labelNames, nil
}
|
[
"func",
"(",
"r",
"*",
"Reader",
")",
"LabelNames",
"(",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"labelNamesMap",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
",",
"len",
"(",
"r",
".",
"labels",
")",
")",
"\n",
"for",
"key",
":=",
"range",
"r",
".",
"labels",
"{",
"// 'key' contains the label names concatenated with the",
"// delimiter 'labelNameSeperator'.",
"names",
":=",
"strings",
".",
"Split",
"(",
"key",
",",
"labelNameSeperator",
")",
"\n",
"for",
"_",
",",
"name",
":=",
"range",
"names",
"{",
"if",
"name",
"==",
"allPostingsKey",
".",
"Name",
"{",
"// This is not from any metric.",
"// It is basically an empty label name.",
"continue",
"\n",
"}",
"\n",
"labelNamesMap",
"[",
"name",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"labelNames",
":=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"len",
"(",
"labelNamesMap",
")",
")",
"\n",
"for",
"name",
":=",
"range",
"labelNamesMap",
"{",
"labelNames",
"=",
"append",
"(",
"labelNames",
",",
"name",
")",
"\n",
"}",
"\n",
"sort",
".",
"Strings",
"(",
"labelNames",
")",
"\n",
"return",
"labelNames",
",",
"nil",
"\n",
"}"
] |
// LabelNames returns all the unique label names present in the index.
|
[
"LabelNames",
"returns",
"all",
"the",
"unique",
"label",
"names",
"present",
"in",
"the",
"index",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L928-L949
|
train
|
prometheus/tsdb
|
index/index.go
|
Postings
|
func (dec *Decoder) Postings(b []byte) (int, Postings, error) {
d := encoding.Decbuf{B: b}
n := d.Be32int()
l := d.Get()
return n, newBigEndianPostings(l), d.Err()
}
|
go
|
func (dec *Decoder) Postings(b []byte) (int, Postings, error) {
d := encoding.Decbuf{B: b}
n := d.Be32int()
l := d.Get()
return n, newBigEndianPostings(l), d.Err()
}
|
[
"func",
"(",
"dec",
"*",
"Decoder",
")",
"Postings",
"(",
"b",
"[",
"]",
"byte",
")",
"(",
"int",
",",
"Postings",
",",
"error",
")",
"{",
"d",
":=",
"encoding",
".",
"Decbuf",
"{",
"B",
":",
"b",
"}",
"\n",
"n",
":=",
"d",
".",
"Be32int",
"(",
")",
"\n",
"l",
":=",
"d",
".",
"Get",
"(",
")",
"\n",
"return",
"n",
",",
"newBigEndianPostings",
"(",
"l",
")",
",",
"d",
".",
"Err",
"(",
")",
"\n",
"}"
] |
// Postings returns a postings list for b and its number of elements.
|
[
"Postings",
"returns",
"a",
"postings",
"list",
"for",
"b",
"and",
"its",
"number",
"of",
"elements",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L1028-L1033
|
train
|
prometheus/tsdb
|
index/index.go
|
Series
|
func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error {
*lbls = (*lbls)[:0]
*chks = (*chks)[:0]
d := encoding.Decbuf{B: b}
k := d.Uvarint()
for i := 0; i < k; i++ {
lno := uint32(d.Uvarint())
lvo := uint32(d.Uvarint())
if d.Err() != nil {
return errors.Wrap(d.Err(), "read series label offsets")
}
ln, err := dec.LookupSymbol(lno)
if err != nil {
return errors.Wrap(err, "lookup label name")
}
lv, err := dec.LookupSymbol(lvo)
if err != nil {
return errors.Wrap(err, "lookup label value")
}
*lbls = append(*lbls, labels.Label{Name: ln, Value: lv})
}
// Read the chunks meta data.
k = d.Uvarint()
if k == 0 {
return nil
}
t0 := d.Varint64()
maxt := int64(d.Uvarint64()) + t0
ref0 := int64(d.Uvarint64())
*chks = append(*chks, chunks.Meta{
Ref: uint64(ref0),
MinTime: t0,
MaxTime: maxt,
})
t0 = maxt
for i := 1; i < k; i++ {
mint := int64(d.Uvarint64()) + t0
maxt := int64(d.Uvarint64()) + mint
ref0 += d.Varint64()
t0 = maxt
if d.Err() != nil {
return errors.Wrapf(d.Err(), "read meta for chunk %d", i)
}
*chks = append(*chks, chunks.Meta{
Ref: uint64(ref0),
MinTime: mint,
MaxTime: maxt,
})
}
return d.Err()
}
|
go
|
func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error {
*lbls = (*lbls)[:0]
*chks = (*chks)[:0]
d := encoding.Decbuf{B: b}
k := d.Uvarint()
for i := 0; i < k; i++ {
lno := uint32(d.Uvarint())
lvo := uint32(d.Uvarint())
if d.Err() != nil {
return errors.Wrap(d.Err(), "read series label offsets")
}
ln, err := dec.LookupSymbol(lno)
if err != nil {
return errors.Wrap(err, "lookup label name")
}
lv, err := dec.LookupSymbol(lvo)
if err != nil {
return errors.Wrap(err, "lookup label value")
}
*lbls = append(*lbls, labels.Label{Name: ln, Value: lv})
}
// Read the chunks meta data.
k = d.Uvarint()
if k == 0 {
return nil
}
t0 := d.Varint64()
maxt := int64(d.Uvarint64()) + t0
ref0 := int64(d.Uvarint64())
*chks = append(*chks, chunks.Meta{
Ref: uint64(ref0),
MinTime: t0,
MaxTime: maxt,
})
t0 = maxt
for i := 1; i < k; i++ {
mint := int64(d.Uvarint64()) + t0
maxt := int64(d.Uvarint64()) + mint
ref0 += d.Varint64()
t0 = maxt
if d.Err() != nil {
return errors.Wrapf(d.Err(), "read meta for chunk %d", i)
}
*chks = append(*chks, chunks.Meta{
Ref: uint64(ref0),
MinTime: mint,
MaxTime: maxt,
})
}
return d.Err()
}
|
[
"func",
"(",
"dec",
"*",
"Decoder",
")",
"Series",
"(",
"b",
"[",
"]",
"byte",
",",
"lbls",
"*",
"labels",
".",
"Labels",
",",
"chks",
"*",
"[",
"]",
"chunks",
".",
"Meta",
")",
"error",
"{",
"*",
"lbls",
"=",
"(",
"*",
"lbls",
")",
"[",
":",
"0",
"]",
"\n",
"*",
"chks",
"=",
"(",
"*",
"chks",
")",
"[",
":",
"0",
"]",
"\n\n",
"d",
":=",
"encoding",
".",
"Decbuf",
"{",
"B",
":",
"b",
"}",
"\n\n",
"k",
":=",
"d",
".",
"Uvarint",
"(",
")",
"\n\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"k",
";",
"i",
"++",
"{",
"lno",
":=",
"uint32",
"(",
"d",
".",
"Uvarint",
"(",
")",
")",
"\n",
"lvo",
":=",
"uint32",
"(",
"d",
".",
"Uvarint",
"(",
")",
")",
"\n\n",
"if",
"d",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"d",
".",
"Err",
"(",
")",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"ln",
",",
"err",
":=",
"dec",
".",
"LookupSymbol",
"(",
"lno",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"lv",
",",
"err",
":=",
"dec",
".",
"LookupSymbol",
"(",
"lvo",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"*",
"lbls",
"=",
"append",
"(",
"*",
"lbls",
",",
"labels",
".",
"Label",
"{",
"Name",
":",
"ln",
",",
"Value",
":",
"lv",
"}",
")",
"\n",
"}",
"\n\n",
"// Read the chunks meta data.",
"k",
"=",
"d",
".",
"Uvarint",
"(",
")",
"\n\n",
"if",
"k",
"==",
"0",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"t0",
":=",
"d",
".",
"Varint64",
"(",
")",
"\n",
"maxt",
":=",
"int64",
"(",
"d",
".",
"Uvarint64",
"(",
")",
")",
"+",
"t0",
"\n",
"ref0",
":=",
"int64",
"(",
"d",
".",
"Uvarint64",
"(",
")",
")",
"\n\n",
"*",
"chks",
"=",
"append",
"(",
"*",
"chks",
",",
"chunks",
".",
"Meta",
"{",
"Ref",
":",
"uint64",
"(",
"ref0",
")",
",",
"MinTime",
":",
"t0",
",",
"MaxTime",
":",
"maxt",
",",
"}",
")",
"\n",
"t0",
"=",
"maxt",
"\n\n",
"for",
"i",
":=",
"1",
";",
"i",
"<",
"k",
";",
"i",
"++",
"{",
"mint",
":=",
"int64",
"(",
"d",
".",
"Uvarint64",
"(",
")",
")",
"+",
"t0",
"\n",
"maxt",
":=",
"int64",
"(",
"d",
".",
"Uvarint64",
"(",
")",
")",
"+",
"mint",
"\n\n",
"ref0",
"+=",
"d",
".",
"Varint64",
"(",
")",
"\n",
"t0",
"=",
"maxt",
"\n\n",
"if",
"d",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"errors",
".",
"Wrapf",
"(",
"d",
".",
"Err",
"(",
")",
",",
"\"",
"\"",
",",
"i",
")",
"\n",
"}",
"\n\n",
"*",
"chks",
"=",
"append",
"(",
"*",
"chks",
",",
"chunks",
".",
"Meta",
"{",
"Ref",
":",
"uint64",
"(",
"ref0",
")",
",",
"MinTime",
":",
"mint",
",",
"MaxTime",
":",
"maxt",
",",
"}",
")",
"\n",
"}",
"\n",
"return",
"d",
".",
"Err",
"(",
")",
"\n",
"}"
] |
// Series decodes a series entry from the given byte slice into lset and chks.
|
[
"Series",
"decodes",
"a",
"series",
"entry",
"from",
"the",
"given",
"byte",
"slice",
"into",
"lset",
"and",
"chks",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L1036-L1100
|
train
|
prometheus/tsdb
|
labels/selector.go
|
Matches
|
func (s Selector) Matches(labels Labels) bool {
for _, m := range s {
if v := labels.Get(m.Name()); !m.Matches(v) {
return false
}
}
return true
}
|
go
|
func (s Selector) Matches(labels Labels) bool {
for _, m := range s {
if v := labels.Get(m.Name()); !m.Matches(v) {
return false
}
}
return true
}
|
[
"func",
"(",
"s",
"Selector",
")",
"Matches",
"(",
"labels",
"Labels",
")",
"bool",
"{",
"for",
"_",
",",
"m",
":=",
"range",
"s",
"{",
"if",
"v",
":=",
"labels",
".",
"Get",
"(",
"m",
".",
"Name",
"(",
")",
")",
";",
"!",
"m",
".",
"Matches",
"(",
"v",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}"
] |
// Matches returns whether the labels satisfy all matchers.
|
[
"Matches",
"returns",
"whether",
"the",
"labels",
"satisfy",
"all",
"matchers",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L25-L32
|
train
|
prometheus/tsdb
|
labels/selector.go
|
NewEqualMatcher
|
func NewEqualMatcher(name, value string) Matcher {
return &EqualMatcher{name: name, value: value}
}
|
go
|
func NewEqualMatcher(name, value string) Matcher {
return &EqualMatcher{name: name, value: value}
}
|
[
"func",
"NewEqualMatcher",
"(",
"name",
",",
"value",
"string",
")",
"Matcher",
"{",
"return",
"&",
"EqualMatcher",
"{",
"name",
":",
"name",
",",
"value",
":",
"value",
"}",
"\n",
"}"
] |
// NewEqualMatcher returns a new matcher matching an exact label value.
|
[
"NewEqualMatcher",
"returns",
"a",
"new",
"matcher",
"matching",
"an",
"exact",
"label",
"value",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L62-L64
|
train
|
prometheus/tsdb
|
labels/selector.go
|
NewRegexpMatcher
|
func NewRegexpMatcher(name, pattern string) (Matcher, error) {
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
return ®expMatcher{name: name, re: re}, nil
}
|
go
|
func NewRegexpMatcher(name, pattern string) (Matcher, error) {
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
return ®expMatcher{name: name, re: re}, nil
}
|
[
"func",
"NewRegexpMatcher",
"(",
"name",
",",
"pattern",
"string",
")",
"(",
"Matcher",
",",
"error",
")",
"{",
"re",
",",
"err",
":=",
"regexp",
".",
"Compile",
"(",
"pattern",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"&",
"regexpMatcher",
"{",
"name",
":",
"name",
",",
"re",
":",
"re",
"}",
",",
"nil",
"\n",
"}"
] |
// NewRegexpMatcher returns a new matcher verifying that a value matches
// the regular expression pattern.
|
[
"NewRegexpMatcher",
"returns",
"a",
"new",
"matcher",
"verifying",
"that",
"a",
"value",
"matches",
"the",
"regular",
"expression",
"pattern",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L77-L83
|
train
|
prometheus/tsdb
|
labels/selector.go
|
NewMustRegexpMatcher
|
func NewMustRegexpMatcher(name, pattern string) Matcher {
re, err := regexp.Compile(pattern)
if err != nil {
panic(err)
}
return ®expMatcher{name: name, re: re}
}
|
go
|
func NewMustRegexpMatcher(name, pattern string) Matcher {
re, err := regexp.Compile(pattern)
if err != nil {
panic(err)
}
return ®expMatcher{name: name, re: re}
}
|
[
"func",
"NewMustRegexpMatcher",
"(",
"name",
",",
"pattern",
"string",
")",
"Matcher",
"{",
"re",
",",
"err",
":=",
"regexp",
".",
"Compile",
"(",
"pattern",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"&",
"regexpMatcher",
"{",
"name",
":",
"name",
",",
"re",
":",
"re",
"}",
"\n\n",
"}"
] |
// NewMustRegexpMatcher returns a new matcher verifying that a value matches
// the regular expression pattern. Will panic if the pattern is not a valid
// regular expression.
|
[
"NewMustRegexpMatcher",
"returns",
"a",
"new",
"matcher",
"verifying",
"that",
"a",
"value",
"matches",
"the",
"regular",
"expression",
"pattern",
".",
"Will",
"panic",
"if",
"the",
"pattern",
"is",
"not",
"a",
"valid",
"regular",
"expression",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L88-L95
|
train
|
prometheus/tsdb
|
chunks/chunks.go
|
writeHash
|
func (cm *Meta) writeHash(h hash.Hash) error {
if _, err := h.Write([]byte{byte(cm.Chunk.Encoding())}); err != nil {
return err
}
if _, err := h.Write(cm.Chunk.Bytes()); err != nil {
return err
}
return nil
}
|
go
|
func (cm *Meta) writeHash(h hash.Hash) error {
if _, err := h.Write([]byte{byte(cm.Chunk.Encoding())}); err != nil {
return err
}
if _, err := h.Write(cm.Chunk.Bytes()); err != nil {
return err
}
return nil
}
|
[
"func",
"(",
"cm",
"*",
"Meta",
")",
"writeHash",
"(",
"h",
"hash",
".",
"Hash",
")",
"error",
"{",
"if",
"_",
",",
"err",
":=",
"h",
".",
"Write",
"(",
"[",
"]",
"byte",
"{",
"byte",
"(",
"cm",
".",
"Chunk",
".",
"Encoding",
"(",
")",
")",
"}",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"_",
",",
"err",
":=",
"h",
".",
"Write",
"(",
"cm",
".",
"Chunk",
".",
"Bytes",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] |
// writeHash writes the chunk encoding and raw data into the provided hash.
|
[
"writeHash",
"writes",
"the",
"chunk",
"encoding",
"and",
"raw",
"data",
"into",
"the",
"provided",
"hash",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunks/chunks.go#L58-L66
|
train
|
prometheus/tsdb
|
chunks/chunks.go
|
NewWriter
|
func NewWriter(dir string) (*Writer, error) {
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
dirFile, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
cw := &Writer{
dirFile: dirFile,
n: 0,
crc32: newCRC32(),
segmentSize: defaultChunkSegmentSize,
}
return cw, nil
}
|
go
|
func NewWriter(dir string) (*Writer, error) {
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
dirFile, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
cw := &Writer{
dirFile: dirFile,
n: 0,
crc32: newCRC32(),
segmentSize: defaultChunkSegmentSize,
}
return cw, nil
}
|
[
"func",
"NewWriter",
"(",
"dir",
"string",
")",
"(",
"*",
"Writer",
",",
"error",
")",
"{",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"dir",
",",
"0777",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"dirFile",
",",
"err",
":=",
"fileutil",
".",
"OpenDir",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"cw",
":=",
"&",
"Writer",
"{",
"dirFile",
":",
"dirFile",
",",
"n",
":",
"0",
",",
"crc32",
":",
"newCRC32",
"(",
")",
",",
"segmentSize",
":",
"defaultChunkSegmentSize",
",",
"}",
"\n",
"return",
"cw",
",",
"nil",
"\n",
"}"
] |
// NewWriter returns a new writer against the given directory.
|
[
"NewWriter",
"returns",
"a",
"new",
"writer",
"against",
"the",
"given",
"directory",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunks/chunks.go#L107-L122
|
train
|
prometheus/tsdb
|
chunks/chunks.go
|
finalizeTail
|
func (w *Writer) finalizeTail() error {
tf := w.tail()
if tf == nil {
return nil
}
if err := w.wbuf.Flush(); err != nil {
return err
}
if err := tf.Sync(); err != nil {
return err
}
// As the file was pre-allocated, we truncate any superfluous zero bytes.
off, err := tf.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if err := tf.Truncate(off); err != nil {
return err
}
return tf.Close()
}
|
go
|
func (w *Writer) finalizeTail() error {
tf := w.tail()
if tf == nil {
return nil
}
if err := w.wbuf.Flush(); err != nil {
return err
}
if err := tf.Sync(); err != nil {
return err
}
// As the file was pre-allocated, we truncate any superfluous zero bytes.
off, err := tf.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if err := tf.Truncate(off); err != nil {
return err
}
return tf.Close()
}
|
[
"func",
"(",
"w",
"*",
"Writer",
")",
"finalizeTail",
"(",
")",
"error",
"{",
"tf",
":=",
"w",
".",
"tail",
"(",
")",
"\n",
"if",
"tf",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"w",
".",
"wbuf",
".",
"Flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"tf",
".",
"Sync",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// As the file was pre-allocated, we truncate any superfluous zero bytes.",
"off",
",",
"err",
":=",
"tf",
".",
"Seek",
"(",
"0",
",",
"io",
".",
"SeekCurrent",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"tf",
".",
"Truncate",
"(",
"off",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"return",
"tf",
".",
"Close",
"(",
")",
"\n",
"}"
] |
// finalizeTail writes all pending data to the current tail file,
// truncates its size, and closes it.
|
[
"finalizeTail",
"writes",
"all",
"pending",
"data",
"to",
"the",
"current",
"tail",
"file",
"truncates",
"its",
"size",
"and",
"closes",
"it",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunks/chunks.go#L133-L155
|
train
|
prometheus/tsdb
|
chunks/chunks.go
|
MergeChunks
|
func MergeChunks(a, b chunkenc.Chunk) (*chunkenc.XORChunk, error) {
newChunk := chunkenc.NewXORChunk()
app, err := newChunk.Appender()
if err != nil {
return nil, err
}
ait := a.Iterator()
bit := b.Iterator()
aok, bok := ait.Next(), bit.Next()
for aok && bok {
at, av := ait.At()
bt, bv := bit.At()
if at < bt {
app.Append(at, av)
aok = ait.Next()
} else if bt < at {
app.Append(bt, bv)
bok = bit.Next()
} else {
app.Append(bt, bv)
aok = ait.Next()
bok = bit.Next()
}
}
for aok {
at, av := ait.At()
app.Append(at, av)
aok = ait.Next()
}
for bok {
bt, bv := bit.At()
app.Append(bt, bv)
bok = bit.Next()
}
if ait.Err() != nil {
return nil, ait.Err()
}
if bit.Err() != nil {
return nil, bit.Err()
}
return newChunk, nil
}
|
go
|
func MergeChunks(a, b chunkenc.Chunk) (*chunkenc.XORChunk, error) {
newChunk := chunkenc.NewXORChunk()
app, err := newChunk.Appender()
if err != nil {
return nil, err
}
ait := a.Iterator()
bit := b.Iterator()
aok, bok := ait.Next(), bit.Next()
for aok && bok {
at, av := ait.At()
bt, bv := bit.At()
if at < bt {
app.Append(at, av)
aok = ait.Next()
} else if bt < at {
app.Append(bt, bv)
bok = bit.Next()
} else {
app.Append(bt, bv)
aok = ait.Next()
bok = bit.Next()
}
}
for aok {
at, av := ait.At()
app.Append(at, av)
aok = ait.Next()
}
for bok {
bt, bv := bit.At()
app.Append(bt, bv)
bok = bit.Next()
}
if ait.Err() != nil {
return nil, ait.Err()
}
if bit.Err() != nil {
return nil, bit.Err()
}
return newChunk, nil
}
|
[
"func",
"MergeChunks",
"(",
"a",
",",
"b",
"chunkenc",
".",
"Chunk",
")",
"(",
"*",
"chunkenc",
".",
"XORChunk",
",",
"error",
")",
"{",
"newChunk",
":=",
"chunkenc",
".",
"NewXORChunk",
"(",
")",
"\n",
"app",
",",
"err",
":=",
"newChunk",
".",
"Appender",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"ait",
":=",
"a",
".",
"Iterator",
"(",
")",
"\n",
"bit",
":=",
"b",
".",
"Iterator",
"(",
")",
"\n",
"aok",
",",
"bok",
":=",
"ait",
".",
"Next",
"(",
")",
",",
"bit",
".",
"Next",
"(",
")",
"\n",
"for",
"aok",
"&&",
"bok",
"{",
"at",
",",
"av",
":=",
"ait",
".",
"At",
"(",
")",
"\n",
"bt",
",",
"bv",
":=",
"bit",
".",
"At",
"(",
")",
"\n",
"if",
"at",
"<",
"bt",
"{",
"app",
".",
"Append",
"(",
"at",
",",
"av",
")",
"\n",
"aok",
"=",
"ait",
".",
"Next",
"(",
")",
"\n",
"}",
"else",
"if",
"bt",
"<",
"at",
"{",
"app",
".",
"Append",
"(",
"bt",
",",
"bv",
")",
"\n",
"bok",
"=",
"bit",
".",
"Next",
"(",
")",
"\n",
"}",
"else",
"{",
"app",
".",
"Append",
"(",
"bt",
",",
"bv",
")",
"\n",
"aok",
"=",
"ait",
".",
"Next",
"(",
")",
"\n",
"bok",
"=",
"bit",
".",
"Next",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"aok",
"{",
"at",
",",
"av",
":=",
"ait",
".",
"At",
"(",
")",
"\n",
"app",
".",
"Append",
"(",
"at",
",",
"av",
")",
"\n",
"aok",
"=",
"ait",
".",
"Next",
"(",
")",
"\n",
"}",
"\n",
"for",
"bok",
"{",
"bt",
",",
"bv",
":=",
"bit",
".",
"At",
"(",
")",
"\n",
"app",
".",
"Append",
"(",
"bt",
",",
"bv",
")",
"\n",
"bok",
"=",
"bit",
".",
"Next",
"(",
")",
"\n",
"}",
"\n",
"if",
"ait",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"nil",
",",
"ait",
".",
"Err",
"(",
")",
"\n",
"}",
"\n",
"if",
"bit",
".",
"Err",
"(",
")",
"!=",
"nil",
"{",
"return",
"nil",
",",
"bit",
".",
"Err",
"(",
")",
"\n",
"}",
"\n",
"return",
"newChunk",
",",
"nil",
"\n",
"}"
] |
// MergeChunks vertically merges a and b, i.e., if there is any sample
// with same timestamp in both a and b, the sample in a is discarded.
|
[
"MergeChunks",
"vertically",
"merges",
"a",
"and",
"b",
"i",
".",
"e",
".",
"if",
"there",
"is",
"any",
"sample",
"with",
"same",
"timestamp",
"in",
"both",
"a",
"and",
"b",
"the",
"sample",
"in",
"a",
"is",
"discarded",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunks/chunks.go#L240-L281
|
train
|
prometheus/tsdb
|
chunks/chunks.go
|
NewDirReader
|
func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
files, err := sequenceFiles(dir)
if err != nil {
return nil, err
}
if pool == nil {
pool = chunkenc.NewPool()
}
var (
bs []ByteSlice
cs []io.Closer
merr tsdb_errors.MultiError
)
for _, fn := range files {
f, err := fileutil.OpenMmapFile(fn)
if err != nil {
merr.Add(errors.Wrap(err, "mmap files"))
merr.Add(closeAll(cs))
return nil, merr
}
cs = append(cs, f)
bs = append(bs, realByteSlice(f.Bytes()))
}
reader, err := newReader(bs, cs, pool)
if err != nil {
merr.Add(err)
merr.Add(closeAll(cs))
return nil, merr
}
return reader, nil
}
|
go
|
func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
files, err := sequenceFiles(dir)
if err != nil {
return nil, err
}
if pool == nil {
pool = chunkenc.NewPool()
}
var (
bs []ByteSlice
cs []io.Closer
merr tsdb_errors.MultiError
)
for _, fn := range files {
f, err := fileutil.OpenMmapFile(fn)
if err != nil {
merr.Add(errors.Wrap(err, "mmap files"))
merr.Add(closeAll(cs))
return nil, merr
}
cs = append(cs, f)
bs = append(bs, realByteSlice(f.Bytes()))
}
reader, err := newReader(bs, cs, pool)
if err != nil {
merr.Add(err)
merr.Add(closeAll(cs))
return nil, merr
}
return reader, nil
}
|
[
"func",
"NewDirReader",
"(",
"dir",
"string",
",",
"pool",
"chunkenc",
".",
"Pool",
")",
"(",
"*",
"Reader",
",",
"error",
")",
"{",
"files",
",",
"err",
":=",
"sequenceFiles",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"pool",
"==",
"nil",
"{",
"pool",
"=",
"chunkenc",
".",
"NewPool",
"(",
")",
"\n",
"}",
"\n\n",
"var",
"(",
"bs",
"[",
"]",
"ByteSlice",
"\n",
"cs",
"[",
"]",
"io",
".",
"Closer",
"\n",
"merr",
"tsdb_errors",
".",
"MultiError",
"\n",
")",
"\n",
"for",
"_",
",",
"fn",
":=",
"range",
"files",
"{",
"f",
",",
"err",
":=",
"fileutil",
".",
"OpenMmapFile",
"(",
"fn",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"merr",
".",
"Add",
"(",
"errors",
".",
"Wrap",
"(",
"err",
",",
"\"",
"\"",
")",
")",
"\n",
"merr",
".",
"Add",
"(",
"closeAll",
"(",
"cs",
")",
")",
"\n",
"return",
"nil",
",",
"merr",
"\n",
"}",
"\n",
"cs",
"=",
"append",
"(",
"cs",
",",
"f",
")",
"\n",
"bs",
"=",
"append",
"(",
"bs",
",",
"realByteSlice",
"(",
"f",
".",
"Bytes",
"(",
")",
")",
")",
"\n",
"}",
"\n\n",
"reader",
",",
"err",
":=",
"newReader",
"(",
"bs",
",",
"cs",
",",
"pool",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"merr",
".",
"Add",
"(",
"err",
")",
"\n",
"merr",
".",
"Add",
"(",
"closeAll",
"(",
"cs",
")",
")",
"\n",
"return",
"nil",
",",
"merr",
"\n",
"}",
"\n",
"return",
"reader",
",",
"nil",
"\n",
"}"
] |
// NewDirReader returns a new Reader against sequentially numbered files in the
// given directory.
|
[
"NewDirReader",
"returns",
"a",
"new",
"Reader",
"against",
"sequentially",
"numbered",
"files",
"in",
"the",
"given",
"directory",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunks/chunks.go#L401-L433
|
train
|
prometheus/tsdb
|
chunks/chunks.go
|
Chunk
|
func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
var (
sgmSeq = int(ref >> 32)
sgmOffset = int((ref << 32) >> 32)
)
if sgmSeq >= len(s.bs) {
return nil, errors.Errorf("reference sequence %d out of range", sgmSeq)
}
chkS := s.bs[sgmSeq]
if sgmOffset >= chkS.Len() {
return nil, errors.Errorf("offset %d beyond data size %d", sgmOffset, chkS.Len())
}
// With the minimum chunk length this should never cause us reading
// over the end of the slice.
chk := chkS.Range(sgmOffset, sgmOffset+binary.MaxVarintLen32)
chkLen, n := binary.Uvarint(chk)
if n <= 0 {
return nil, errors.Errorf("reading chunk length failed with %d", n)
}
chk = chkS.Range(sgmOffset+n, sgmOffset+n+1+int(chkLen))
return s.pool.Get(chunkenc.Encoding(chk[0]), chk[1:1+chkLen])
}
|
go
|
func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
var (
sgmSeq = int(ref >> 32)
sgmOffset = int((ref << 32) >> 32)
)
if sgmSeq >= len(s.bs) {
return nil, errors.Errorf("reference sequence %d out of range", sgmSeq)
}
chkS := s.bs[sgmSeq]
if sgmOffset >= chkS.Len() {
return nil, errors.Errorf("offset %d beyond data size %d", sgmOffset, chkS.Len())
}
// With the minimum chunk length this should never cause us reading
// over the end of the slice.
chk := chkS.Range(sgmOffset, sgmOffset+binary.MaxVarintLen32)
chkLen, n := binary.Uvarint(chk)
if n <= 0 {
return nil, errors.Errorf("reading chunk length failed with %d", n)
}
chk = chkS.Range(sgmOffset+n, sgmOffset+n+1+int(chkLen))
return s.pool.Get(chunkenc.Encoding(chk[0]), chk[1:1+chkLen])
}
|
[
"func",
"(",
"s",
"*",
"Reader",
")",
"Chunk",
"(",
"ref",
"uint64",
")",
"(",
"chunkenc",
".",
"Chunk",
",",
"error",
")",
"{",
"var",
"(",
"sgmSeq",
"=",
"int",
"(",
"ref",
">>",
"32",
")",
"\n",
"sgmOffset",
"=",
"int",
"(",
"(",
"ref",
"<<",
"32",
")",
">>",
"32",
")",
"\n",
")",
"\n",
"if",
"sgmSeq",
">=",
"len",
"(",
"s",
".",
"bs",
")",
"{",
"return",
"nil",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"sgmSeq",
")",
"\n",
"}",
"\n",
"chkS",
":=",
"s",
".",
"bs",
"[",
"sgmSeq",
"]",
"\n\n",
"if",
"sgmOffset",
">=",
"chkS",
".",
"Len",
"(",
")",
"{",
"return",
"nil",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"sgmOffset",
",",
"chkS",
".",
"Len",
"(",
")",
")",
"\n",
"}",
"\n",
"// With the minimum chunk length this should never cause us reading",
"// over the end of the slice.",
"chk",
":=",
"chkS",
".",
"Range",
"(",
"sgmOffset",
",",
"sgmOffset",
"+",
"binary",
".",
"MaxVarintLen32",
")",
"\n\n",
"chkLen",
",",
"n",
":=",
"binary",
".",
"Uvarint",
"(",
"chk",
")",
"\n",
"if",
"n",
"<=",
"0",
"{",
"return",
"nil",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"n",
")",
"\n",
"}",
"\n",
"chk",
"=",
"chkS",
".",
"Range",
"(",
"sgmOffset",
"+",
"n",
",",
"sgmOffset",
"+",
"n",
"+",
"1",
"+",
"int",
"(",
"chkLen",
")",
")",
"\n\n",
"return",
"s",
".",
"pool",
".",
"Get",
"(",
"chunkenc",
".",
"Encoding",
"(",
"chk",
"[",
"0",
"]",
")",
",",
"chk",
"[",
"1",
":",
"1",
"+",
"chkLen",
"]",
")",
"\n",
"}"
] |
// Chunk returns a chunk from a given reference.
|
[
"Chunk",
"returns",
"a",
"chunk",
"from",
"a",
"given",
"reference",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunks/chunks.go#L445-L469
|
train
|
prometheus/tsdb
|
chunkenc/xor.go
|
NewXORChunk
|
func NewXORChunk() *XORChunk {
b := make([]byte, 2, 128)
return &XORChunk{b: bstream{stream: b, count: 0}}
}
|
go
|
func NewXORChunk() *XORChunk {
b := make([]byte, 2, 128)
return &XORChunk{b: bstream{stream: b, count: 0}}
}
|
[
"func",
"NewXORChunk",
"(",
")",
"*",
"XORChunk",
"{",
"b",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"2",
",",
"128",
")",
"\n",
"return",
"&",
"XORChunk",
"{",
"b",
":",
"bstream",
"{",
"stream",
":",
"b",
",",
"count",
":",
"0",
"}",
"}",
"\n",
"}"
] |
// NewXORChunk returns a new chunk with XOR encoding of the given size.
|
[
"NewXORChunk",
"returns",
"a",
"new",
"chunk",
"with",
"XOR",
"encoding",
"of",
"the",
"given",
"size",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunkenc/xor.go#L58-L61
|
train
|
prometheus/tsdb
|
chunkenc/xor.go
|
NumSamples
|
func (c *XORChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
|
go
|
func (c *XORChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
|
[
"func",
"(",
"c",
"*",
"XORChunk",
")",
"NumSamples",
"(",
")",
"int",
"{",
"return",
"int",
"(",
"binary",
".",
"BigEndian",
".",
"Uint16",
"(",
"c",
".",
"Bytes",
"(",
")",
")",
")",
"\n",
"}"
] |
// NumSamples returns the number of samples in the chunk.
|
[
"NumSamples",
"returns",
"the",
"number",
"of",
"samples",
"in",
"the",
"chunk",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunkenc/xor.go#L74-L76
|
train
|
prometheus/tsdb
|
chunkenc/xor.go
|
Appender
|
func (c *XORChunk) Appender() (Appender, error) {
it := c.iterator()
// To get an appender we must know the state it would have if we had
// appended all existing data from scratch.
// We iterate through the end and populate via the iterator's state.
for it.Next() {
}
if err := it.Err(); err != nil {
return nil, err
}
a := &xorAppender{
b: &c.b,
t: it.t,
v: it.val,
tDelta: it.tDelta,
leading: it.leading,
trailing: it.trailing,
}
if binary.BigEndian.Uint16(a.b.bytes()) == 0 {
a.leading = 0xff
}
return a, nil
}
|
go
|
func (c *XORChunk) Appender() (Appender, error) {
it := c.iterator()
// To get an appender we must know the state it would have if we had
// appended all existing data from scratch.
// We iterate through the end and populate via the iterator's state.
for it.Next() {
}
if err := it.Err(); err != nil {
return nil, err
}
a := &xorAppender{
b: &c.b,
t: it.t,
v: it.val,
tDelta: it.tDelta,
leading: it.leading,
trailing: it.trailing,
}
if binary.BigEndian.Uint16(a.b.bytes()) == 0 {
a.leading = 0xff
}
return a, nil
}
|
[
"func",
"(",
"c",
"*",
"XORChunk",
")",
"Appender",
"(",
")",
"(",
"Appender",
",",
"error",
")",
"{",
"it",
":=",
"c",
".",
"iterator",
"(",
")",
"\n\n",
"// To get an appender we must know the state it would have if we had",
"// appended all existing data from scratch.",
"// We iterate through the end and populate via the iterator's state.",
"for",
"it",
".",
"Next",
"(",
")",
"{",
"}",
"\n",
"if",
"err",
":=",
"it",
".",
"Err",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"a",
":=",
"&",
"xorAppender",
"{",
"b",
":",
"&",
"c",
".",
"b",
",",
"t",
":",
"it",
".",
"t",
",",
"v",
":",
"it",
".",
"val",
",",
"tDelta",
":",
"it",
".",
"tDelta",
",",
"leading",
":",
"it",
".",
"leading",
",",
"trailing",
":",
"it",
".",
"trailing",
",",
"}",
"\n",
"if",
"binary",
".",
"BigEndian",
".",
"Uint16",
"(",
"a",
".",
"b",
".",
"bytes",
"(",
")",
")",
"==",
"0",
"{",
"a",
".",
"leading",
"=",
"0xff",
"\n",
"}",
"\n",
"return",
"a",
",",
"nil",
"\n",
"}"
] |
// Appender implements the Chunk interface.
|
[
"Appender",
"implements",
"the",
"Chunk",
"interface",
"."
] |
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
|
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunkenc/xor.go#L79-L103
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/export.go
|
DeleteRegistrationByName
|
func (mc MongoClient) DeleteRegistrationByName(name string) error {
return mc.deleteRegistration(bson.M{"name": name})
}
|
go
|
func (mc MongoClient) DeleteRegistrationByName(name string) error {
return mc.deleteRegistration(bson.M{"name": name})
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"DeleteRegistrationByName",
"(",
"name",
"string",
")",
"error",
"{",
"return",
"mc",
".",
"deleteRegistration",
"(",
"bson",
".",
"M",
"{",
"\"",
"\"",
":",
"name",
"}",
")",
"\n",
"}"
] |
// Delete a registration by name
// UnexpectedError - problem getting in database
// NotFound - no registration with the ID was found
|
[
"Delete",
"a",
"registration",
"by",
"name",
"UnexpectedError",
"-",
"problem",
"getting",
"in",
"database",
"NotFound",
"-",
"no",
"registration",
"with",
"the",
"ID",
"was",
"found"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/export.go#L120-L122
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/export.go
|
getRegistrations
|
func (mc MongoClient) getRegistrations(q bson.M) ([]models.Registration, error) {
s := mc.getSessionCopy()
defer s.Close()
var regs []models.Registration
err := s.DB(mc.database.Name).C(db.ExportCollection).Find(q).All(®s)
if err != nil {
return []models.Registration{}, errorMap(err)
}
return regs, nil
}
|
go
|
func (mc MongoClient) getRegistrations(q bson.M) ([]models.Registration, error) {
s := mc.getSessionCopy()
defer s.Close()
var regs []models.Registration
err := s.DB(mc.database.Name).C(db.ExportCollection).Find(q).All(®s)
if err != nil {
return []models.Registration{}, errorMap(err)
}
return regs, nil
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"getRegistrations",
"(",
"q",
"bson",
".",
"M",
")",
"(",
"[",
"]",
"models",
".",
"Registration",
",",
"error",
")",
"{",
"s",
":=",
"mc",
".",
"getSessionCopy",
"(",
")",
"\n",
"defer",
"s",
".",
"Close",
"(",
")",
"\n\n",
"var",
"regs",
"[",
"]",
"models",
".",
"Registration",
"\n",
"err",
":=",
"s",
".",
"DB",
"(",
"mc",
".",
"database",
".",
"Name",
")",
".",
"C",
"(",
"db",
".",
"ExportCollection",
")",
".",
"Find",
"(",
"q",
")",
".",
"All",
"(",
"&",
"regs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"[",
"]",
"models",
".",
"Registration",
"{",
"}",
",",
"errorMap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"regs",
",",
"nil",
"\n",
"}"
] |
// Get registrations for the passed query
|
[
"Get",
"registrations",
"for",
"the",
"passed",
"query"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/export.go#L134-L145
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/export.go
|
getRegistration
|
func (mc MongoClient) getRegistration(q bson.M) (models.Registration, error) {
s := mc.getSessionCopy()
defer s.Close()
var reg models.Registration
err := s.DB(mc.database.Name).C(db.ExportCollection).Find(q).One(®)
if err != nil {
return models.Registration{}, errorMap(err)
}
return reg, nil
}
|
go
|
func (mc MongoClient) getRegistration(q bson.M) (models.Registration, error) {
s := mc.getSessionCopy()
defer s.Close()
var reg models.Registration
err := s.DB(mc.database.Name).C(db.ExportCollection).Find(q).One(®)
if err != nil {
return models.Registration{}, errorMap(err)
}
return reg, nil
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"getRegistration",
"(",
"q",
"bson",
".",
"M",
")",
"(",
"models",
".",
"Registration",
",",
"error",
")",
"{",
"s",
":=",
"mc",
".",
"getSessionCopy",
"(",
")",
"\n",
"defer",
"s",
".",
"Close",
"(",
")",
"\n\n",
"var",
"reg",
"models",
".",
"Registration",
"\n",
"err",
":=",
"s",
".",
"DB",
"(",
"mc",
".",
"database",
".",
"Name",
")",
".",
"C",
"(",
"db",
".",
"ExportCollection",
")",
".",
"Find",
"(",
"q",
")",
".",
"One",
"(",
"&",
"reg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"models",
".",
"Registration",
"{",
"}",
",",
"errorMap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"reg",
",",
"nil",
"\n",
"}"
] |
// Get a single registration for the passed query
|
[
"Get",
"a",
"single",
"registration",
"for",
"the",
"passed",
"query"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/export.go#L148-L158
|
train
|
edgexfoundry/edgex-go
|
internal/core/data/init.go
|
newDBClient
|
func newDBClient(dbType string) (interfaces.DBClient, error) {
switch dbType {
case db.MongoDB:
dbConfig := db.Configuration{
Host: Configuration.Databases["Primary"].Host,
Port: Configuration.Databases["Primary"].Port,
Timeout: Configuration.Databases["Primary"].Timeout,
DatabaseName: Configuration.Databases["Primary"].Name,
Username: Configuration.Databases["Primary"].Username,
Password: Configuration.Databases["Primary"].Password,
}
return mongo.NewClient(dbConfig)
case db.RedisDB:
dbConfig := db.Configuration{
Host: Configuration.Databases["Primary"].Host,
Port: Configuration.Databases["Primary"].Port,
}
return redis.NewClient(dbConfig) //TODO: Verify this also connects to Redis
default:
return nil, db.ErrUnsupportedDatabase
}
}
|
go
|
func newDBClient(dbType string) (interfaces.DBClient, error) {
switch dbType {
case db.MongoDB:
dbConfig := db.Configuration{
Host: Configuration.Databases["Primary"].Host,
Port: Configuration.Databases["Primary"].Port,
Timeout: Configuration.Databases["Primary"].Timeout,
DatabaseName: Configuration.Databases["Primary"].Name,
Username: Configuration.Databases["Primary"].Username,
Password: Configuration.Databases["Primary"].Password,
}
return mongo.NewClient(dbConfig)
case db.RedisDB:
dbConfig := db.Configuration{
Host: Configuration.Databases["Primary"].Host,
Port: Configuration.Databases["Primary"].Port,
}
return redis.NewClient(dbConfig) //TODO: Verify this also connects to Redis
default:
return nil, db.ErrUnsupportedDatabase
}
}
|
[
"func",
"newDBClient",
"(",
"dbType",
"string",
")",
"(",
"interfaces",
".",
"DBClient",
",",
"error",
")",
"{",
"switch",
"dbType",
"{",
"case",
"db",
".",
"MongoDB",
":",
"dbConfig",
":=",
"db",
".",
"Configuration",
"{",
"Host",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Host",
",",
"Port",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Port",
",",
"Timeout",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Timeout",
",",
"DatabaseName",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Name",
",",
"Username",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Username",
",",
"Password",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Password",
",",
"}",
"\n",
"return",
"mongo",
".",
"NewClient",
"(",
"dbConfig",
")",
"\n",
"case",
"db",
".",
"RedisDB",
":",
"dbConfig",
":=",
"db",
".",
"Configuration",
"{",
"Host",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Host",
",",
"Port",
":",
"Configuration",
".",
"Databases",
"[",
"\"",
"\"",
"]",
".",
"Port",
",",
"}",
"\n",
"return",
"redis",
".",
"NewClient",
"(",
"dbConfig",
")",
"//TODO: Verify this also connects to Redis",
"\n",
"default",
":",
"return",
"nil",
",",
"db",
".",
"ErrUnsupportedDatabase",
"\n",
"}",
"\n",
"}"
] |
// Return the dbClient interface
|
[
"Return",
"the",
"dbClient",
"interface"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/core/data/init.go#L152-L173
|
train
|
edgexfoundry/edgex-go
|
internal/system/agent/utils.go
|
pingHandler
|
func pingHandler(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte("pong"))
}
|
go
|
func pingHandler(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte("pong"))
}
|
[
"func",
"pingHandler",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"_",
"*",
"http",
".",
"Request",
")",
"{",
"w",
".",
"Header",
"(",
")",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"w",
".",
"Write",
"(",
"[",
"]",
"byte",
"(",
"\"",
"\"",
")",
")",
"\n",
"}"
] |
// Test if the service is working
|
[
"Test",
"if",
"the",
"service",
"is",
"working"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/system/agent/utils.go#L25-L28
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
EventsWithLimit
|
func (mc MongoClient) EventsWithLimit(limit int) ([]contract.Event, error) {
return mc.mapEvents(mc.getEventsLimit(bson.M{}, limit))
}
|
go
|
func (mc MongoClient) EventsWithLimit(limit int) ([]contract.Event, error) {
return mc.mapEvents(mc.getEventsLimit(bson.M{}, limit))
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"EventsWithLimit",
"(",
"limit",
"int",
")",
"(",
"[",
"]",
"contract",
".",
"Event",
",",
"error",
")",
"{",
"return",
"mc",
".",
"mapEvents",
"(",
"mc",
".",
"getEventsLimit",
"(",
"bson",
".",
"M",
"{",
"}",
",",
"limit",
")",
")",
"\n",
"}"
] |
// Return events up to the max number specified
// UnexpectedError - failed to retrieve events from the database
// Sort the events in descending order by ID
|
[
"Return",
"events",
"up",
"to",
"the",
"max",
"number",
"specified",
"UnexpectedError",
"-",
"failed",
"to",
"retrieve",
"events",
"from",
"the",
"database",
"Sort",
"the",
"events",
"in",
"descending",
"order",
"by",
"ID"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L42-L44
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
EventCount
|
func (mc MongoClient) EventCount() (int, error) {
s := mc.getSessionCopy()
defer s.Close()
count, err := s.DB(mc.database.Name).C(db.EventsCollection).Find(nil).Count()
if err != nil {
return 0, errorMap(err)
}
return count, nil
}
|
go
|
func (mc MongoClient) EventCount() (int, error) {
s := mc.getSessionCopy()
defer s.Close()
count, err := s.DB(mc.database.Name).C(db.EventsCollection).Find(nil).Count()
if err != nil {
return 0, errorMap(err)
}
return count, nil
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"EventCount",
"(",
")",
"(",
"int",
",",
"error",
")",
"{",
"s",
":=",
"mc",
".",
"getSessionCopy",
"(",
")",
"\n",
"defer",
"s",
".",
"Close",
"(",
")",
"\n\n",
"count",
",",
"err",
":=",
"s",
".",
"DB",
"(",
"mc",
".",
"database",
".",
"Name",
")",
".",
"C",
"(",
"db",
".",
"EventsCollection",
")",
".",
"Find",
"(",
"nil",
")",
".",
"Count",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"errorMap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"count",
",",
"nil",
"\n",
"}"
] |
// Get the number of events in Mongo
|
[
"Get",
"the",
"number",
"of",
"events",
"in",
"Mongo"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L121-L130
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
EventCountByDeviceId
|
func (mc MongoClient) EventCountByDeviceId(id string) (int, error) {
s := mc.getSessionCopy()
defer s.Close()
query := bson.M{"device": id}
count, err := s.DB(mc.database.Name).C(db.EventsCollection).Find(query).Count()
if err != nil {
return 0, errorMap(err)
}
return count, nil
}
|
go
|
func (mc MongoClient) EventCountByDeviceId(id string) (int, error) {
s := mc.getSessionCopy()
defer s.Close()
query := bson.M{"device": id}
count, err := s.DB(mc.database.Name).C(db.EventsCollection).Find(query).Count()
if err != nil {
return 0, errorMap(err)
}
return count, nil
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"EventCountByDeviceId",
"(",
"id",
"string",
")",
"(",
"int",
",",
"error",
")",
"{",
"s",
":=",
"mc",
".",
"getSessionCopy",
"(",
")",
"\n",
"defer",
"s",
".",
"Close",
"(",
")",
"\n\n",
"query",
":=",
"bson",
".",
"M",
"{",
"\"",
"\"",
":",
"id",
"}",
"\n",
"count",
",",
"err",
":=",
"s",
".",
"DB",
"(",
"mc",
".",
"database",
".",
"Name",
")",
".",
"C",
"(",
"db",
".",
"EventsCollection",
")",
".",
"Find",
"(",
"query",
")",
".",
"Count",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"errorMap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"count",
",",
"nil",
"\n",
"}"
] |
// Get the number of events in Mongo for the device
|
[
"Get",
"the",
"number",
"of",
"events",
"in",
"Mongo",
"for",
"the",
"device"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L133-L143
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
DeleteEventById
|
func (mc MongoClient) DeleteEventById(id string) error {
return mc.deleteById(db.EventsCollection, id)
}
|
go
|
func (mc MongoClient) DeleteEventById(id string) error {
return mc.deleteById(db.EventsCollection, id)
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"DeleteEventById",
"(",
"id",
"string",
")",
"error",
"{",
"return",
"mc",
".",
"deleteById",
"(",
"db",
".",
"EventsCollection",
",",
"id",
")",
"\n",
"}"
] |
// Delete an event by ID and all of its readings
// 404 - Event not found
// 503 - Unexpected problems
|
[
"Delete",
"an",
"event",
"by",
"ID",
"and",
"all",
"of",
"its",
"readings",
"404",
"-",
"Event",
"not",
"found",
"503",
"-",
"Unexpected",
"problems"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L148-L150
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
EventsForDeviceLimit
|
func (mc MongoClient) EventsForDeviceLimit(id string, limit int) ([]contract.Event, error) {
return mc.mapEvents(mc.getEventsLimit(bson.M{"device": id}, limit))
}
|
go
|
func (mc MongoClient) EventsForDeviceLimit(id string, limit int) ([]contract.Event, error) {
return mc.mapEvents(mc.getEventsLimit(bson.M{"device": id}, limit))
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"EventsForDeviceLimit",
"(",
"id",
"string",
",",
"limit",
"int",
")",
"(",
"[",
"]",
"contract",
".",
"Event",
",",
"error",
")",
"{",
"return",
"mc",
".",
"mapEvents",
"(",
"mc",
".",
"getEventsLimit",
"(",
"bson",
".",
"M",
"{",
"\"",
"\"",
":",
"id",
"}",
",",
"limit",
")",
")",
"\n",
"}"
] |
// Get a list of events based on the device id and limit
|
[
"Get",
"a",
"list",
"of",
"events",
"based",
"on",
"the",
"device",
"id",
"and",
"limit"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L153-L155
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
EventsByCreationTime
|
func (mc MongoClient) EventsByCreationTime(startTime, endTime int64, limit int) ([]contract.Event, error) {
query := bson.M{"created": bson.M{
"$gte": startTime,
"$lte": endTime,
}}
return mc.mapEvents(mc.getEventsLimit(query, limit))
}
|
go
|
func (mc MongoClient) EventsByCreationTime(startTime, endTime int64, limit int) ([]contract.Event, error) {
query := bson.M{"created": bson.M{
"$gte": startTime,
"$lte": endTime,
}}
return mc.mapEvents(mc.getEventsLimit(query, limit))
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"EventsByCreationTime",
"(",
"startTime",
",",
"endTime",
"int64",
",",
"limit",
"int",
")",
"(",
"[",
"]",
"contract",
".",
"Event",
",",
"error",
")",
"{",
"query",
":=",
"bson",
".",
"M",
"{",
"\"",
"\"",
":",
"bson",
".",
"M",
"{",
"\"",
"\"",
":",
"startTime",
",",
"\"",
"\"",
":",
"endTime",
",",
"}",
"}",
"\n",
"return",
"mc",
".",
"mapEvents",
"(",
"mc",
".",
"getEventsLimit",
"(",
"query",
",",
"limit",
")",
")",
"\n",
"}"
] |
// Return a list of events whose creation time is between startTime and endTime
// Limit the number of results by limit
|
[
"Return",
"a",
"list",
"of",
"events",
"whose",
"creation",
"time",
"is",
"between",
"startTime",
"and",
"endTime",
"Limit",
"the",
"number",
"of",
"results",
"by",
"limit"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L164-L170
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
EventsPushed
|
func (mc MongoClient) EventsPushed() ([]contract.Event, error) {
return mc.mapEvents(mc.getEvents(bson.M{"pushed": bson.M{"$gt": int64(0)}}))
}
|
go
|
func (mc MongoClient) EventsPushed() ([]contract.Event, error) {
return mc.mapEvents(mc.getEvents(bson.M{"pushed": bson.M{"$gt": int64(0)}}))
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"EventsPushed",
"(",
")",
"(",
"[",
"]",
"contract",
".",
"Event",
",",
"error",
")",
"{",
"return",
"mc",
".",
"mapEvents",
"(",
"mc",
".",
"getEvents",
"(",
"bson",
".",
"M",
"{",
"\"",
"\"",
":",
"bson",
".",
"M",
"{",
"\"",
"\"",
":",
"int64",
"(",
"0",
")",
"}",
"}",
")",
")",
"\n",
"}"
] |
// Get all of the events that have been pushed
|
[
"Get",
"all",
"of",
"the",
"events",
"that",
"have",
"been",
"pushed"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L179-L181
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
ScrubAllEvents
|
func (mc MongoClient) ScrubAllEvents() error {
s := mc.getSessionCopy()
defer s.Close()
_, err := s.DB(mc.database.Name).C(db.ReadingsCollection).RemoveAll(nil)
if err != nil {
return errorMap(err)
}
_, err = s.DB(mc.database.Name).C(db.EventsCollection).RemoveAll(nil)
if err != nil {
return errorMap(err)
}
return nil
}
|
go
|
func (mc MongoClient) ScrubAllEvents() error {
s := mc.getSessionCopy()
defer s.Close()
_, err := s.DB(mc.database.Name).C(db.ReadingsCollection).RemoveAll(nil)
if err != nil {
return errorMap(err)
}
_, err = s.DB(mc.database.Name).C(db.EventsCollection).RemoveAll(nil)
if err != nil {
return errorMap(err)
}
return nil
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"ScrubAllEvents",
"(",
")",
"error",
"{",
"s",
":=",
"mc",
".",
"getSessionCopy",
"(",
")",
"\n",
"defer",
"s",
".",
"Close",
"(",
")",
"\n\n",
"_",
",",
"err",
":=",
"s",
".",
"DB",
"(",
"mc",
".",
"database",
".",
"Name",
")",
".",
"C",
"(",
"db",
".",
"ReadingsCollection",
")",
".",
"RemoveAll",
"(",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errorMap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"_",
",",
"err",
"=",
"s",
".",
"DB",
"(",
"mc",
".",
"database",
".",
"Name",
")",
".",
"C",
"(",
"db",
".",
"EventsCollection",
")",
".",
"RemoveAll",
"(",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errorMap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] |
// Delete all of the readings and all of the events
|
[
"Delete",
"all",
"of",
"the",
"readings",
"and",
"all",
"of",
"the",
"events"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L184-L199
|
train
|
edgexfoundry/edgex-go
|
internal/pkg/db/mongo/data.go
|
getEvents
|
func (mc MongoClient) getEvents(q bson.M) (me []models.Event, err error) {
s := mc.getSessionCopy()
defer s.Close()
err = s.DB(mc.database.Name).C(db.EventsCollection).Find(q).All(&me)
if err != nil {
return []models.Event{}, errorMap(err)
}
return
}
|
go
|
func (mc MongoClient) getEvents(q bson.M) (me []models.Event, err error) {
s := mc.getSessionCopy()
defer s.Close()
err = s.DB(mc.database.Name).C(db.EventsCollection).Find(q).All(&me)
if err != nil {
return []models.Event{}, errorMap(err)
}
return
}
|
[
"func",
"(",
"mc",
"MongoClient",
")",
"getEvents",
"(",
"q",
"bson",
".",
"M",
")",
"(",
"me",
"[",
"]",
"models",
".",
"Event",
",",
"err",
"error",
")",
"{",
"s",
":=",
"mc",
".",
"getSessionCopy",
"(",
")",
"\n",
"defer",
"s",
".",
"Close",
"(",
")",
"\n\n",
"err",
"=",
"s",
".",
"DB",
"(",
"mc",
".",
"database",
".",
"Name",
")",
".",
"C",
"(",
"db",
".",
"EventsCollection",
")",
".",
"Find",
"(",
"q",
")",
".",
"All",
"(",
"&",
"me",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"[",
"]",
"models",
".",
"Event",
"{",
"}",
",",
"errorMap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}"
] |
// Get events for the passed query
|
[
"Get",
"events",
"for",
"the",
"passed",
"query"
] |
c67086fe10c4d34caeefffaee5379490103dd63f
|
https://github.com/edgexfoundry/edgex-go/blob/c67086fe10c4d34caeefffaee5379490103dd63f/internal/pkg/db/mongo/data.go#L202-L211
|
train
|
Subsets and Splits
SQL Console for semeru/code-text-go
Retrieves a limited set of code samples with their languages, with a specific case adjustment for 'Go' language.