Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector.go +89 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector_test.go +159 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/presets.go +27 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms.go +500 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms_block.go +96 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection_reusable.go +132 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_inverted_reusable.go +140 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_map.go +167 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_replace.go +303 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set.go +54 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set_range.go +57 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/doc.go +75 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/doc.go +13 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/strategies.go +48 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/fake.wal +0 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/global_bucket_registry.go +56 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/helper_for_test.go +24 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/lazy_segment.go +274 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable.go +507 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush.go +333 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_inverted.go +261 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set.go +73 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set_range.go +68 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_metrics.go +43 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set.go +149 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range.go +98 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range_test.go +120 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_test.go +245 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor.go +87 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor_test.go +119 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_test.go +80 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/metrics.go +328 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mmap_vs_read_test.go +99 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mock_bucket_creator.go +129 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys.go +127 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys_test.go +174 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/rbtree/rbtree.go +176 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/recover_from_wal_integration_test.go +973 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/red_black_tree_test.go +424 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/search_segment.go +370 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment.go +683 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax.go +603 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax_test.go +47 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters.go +331 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters_test.go +642 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_cleaner_replace.go +201 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_collection_strategy.go +118 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group.go +1052 -0
- platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup.go +681 -0
.gitattributes
CHANGED
|
@@ -3134,3 +3134,4 @@ platform/dbops/binaries/build/bin/m4 filter=lfs diff=lfs merge=lfs -text
|
|
| 3134 |
platform/dbops/binaries/build/bin/bison filter=lfs diff=lfs merge=lfs -text
|
| 3135 |
platform/dbops/binaries/build/bin/flex filter=lfs diff=lfs merge=lfs -text
|
| 3136 |
platform/dbops/binaries/build/bin/flex++ filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 3134 |
platform/dbops/binaries/build/bin/bison filter=lfs diff=lfs merge=lfs -text
|
| 3135 |
platform/dbops/binaries/build/bin/flex filter=lfs diff=lfs merge=lfs -text
|
| 3136 |
platform/dbops/binaries/build/bin/flex++ filter=lfs diff=lfs merge=lfs -text
|
| 3137 |
+
platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compression_tests/fixtures/restart-from-zero-segments/1234567 filter=lfs diff=lfs merge=lfs -text
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector.go
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package stopwords
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"sync"
|
| 16 |
+
|
| 17 |
+
"github.com/weaviate/weaviate/entities/models"
|
| 18 |
+
|
| 19 |
+
"github.com/pkg/errors"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
type StopwordDetector interface {
|
| 23 |
+
IsStopword(string) bool
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
type Detector struct {
|
| 27 |
+
sync.Mutex
|
| 28 |
+
stopwords map[string]struct{}
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
func NewDetectorFromConfig(config models.StopwordConfig) (*Detector, error) {
|
| 32 |
+
d, err := NewDetectorFromPreset(config.Preset)
|
| 33 |
+
if err != nil {
|
| 34 |
+
return nil, errors.Wrap(err, "failed to create new detector from config")
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
d.SetAdditions(config.Additions)
|
| 38 |
+
d.SetRemovals(config.Removals)
|
| 39 |
+
|
| 40 |
+
return d, nil
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
func NewDetectorFromPreset(preset string) (*Detector, error) {
|
| 44 |
+
var list []string
|
| 45 |
+
var ok bool
|
| 46 |
+
|
| 47 |
+
if preset != "" {
|
| 48 |
+
list, ok = Presets[preset]
|
| 49 |
+
if !ok {
|
| 50 |
+
return nil, errors.Errorf("preset %q not known to stopword detector", preset)
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
d := &Detector{
|
| 55 |
+
stopwords: map[string]struct{}{},
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
for _, word := range list {
|
| 59 |
+
d.stopwords[word] = struct{}{}
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
return d, nil
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
func (d *Detector) SetAdditions(additions []string) {
|
| 66 |
+
d.Lock()
|
| 67 |
+
defer d.Unlock()
|
| 68 |
+
|
| 69 |
+
for _, add := range additions {
|
| 70 |
+
d.stopwords[add] = struct{}{}
|
| 71 |
+
}
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
func (d *Detector) SetRemovals(removals []string) {
|
| 75 |
+
d.Lock()
|
| 76 |
+
defer d.Unlock()
|
| 77 |
+
|
| 78 |
+
for _, rem := range removals {
|
| 79 |
+
delete(d.stopwords, rem)
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
func (d *Detector) IsStopword(word string) bool {
|
| 84 |
+
d.Lock()
|
| 85 |
+
defer d.Unlock()
|
| 86 |
+
|
| 87 |
+
_, ok := d.stopwords[word]
|
| 88 |
+
return ok
|
| 89 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector_test.go
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package stopwords
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"testing"
|
| 16 |
+
|
| 17 |
+
"github.com/stretchr/testify/require"
|
| 18 |
+
"github.com/weaviate/weaviate/entities/models"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
func TestStopwordDetector(t *testing.T) {
|
| 22 |
+
type testcase struct {
|
| 23 |
+
cfg models.StopwordConfig
|
| 24 |
+
input []string
|
| 25 |
+
expectedCountable int
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
runTest := func(t *testing.T, tests []testcase) {
|
| 29 |
+
for _, test := range tests {
|
| 30 |
+
sd, err := NewDetectorFromConfig(test.cfg)
|
| 31 |
+
require.Nil(t, err)
|
| 32 |
+
|
| 33 |
+
var result []string
|
| 34 |
+
for _, word := range test.input {
|
| 35 |
+
if !sd.IsStopword(word) {
|
| 36 |
+
result = append(result, word)
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
require.Equal(t, test.expectedCountable, len(result))
|
| 40 |
+
}
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
t.Run("with en preset, additions", func(t *testing.T) {
|
| 44 |
+
tests := []testcase{
|
| 45 |
+
{
|
| 46 |
+
cfg: models.StopwordConfig{
|
| 47 |
+
Preset: "en",
|
| 48 |
+
Additions: []string{"dog"},
|
| 49 |
+
},
|
| 50 |
+
input: []string{"dog", "dog", "dog", "dog"},
|
| 51 |
+
expectedCountable: 0,
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
cfg: models.StopwordConfig{
|
| 55 |
+
Preset: "en",
|
| 56 |
+
Additions: []string{"dog"},
|
| 57 |
+
},
|
| 58 |
+
input: []string{"dog", "dog", "dog", "cat"},
|
| 59 |
+
expectedCountable: 1,
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
cfg: models.StopwordConfig{
|
| 63 |
+
Preset: "en",
|
| 64 |
+
Additions: []string{"dog"},
|
| 65 |
+
},
|
| 66 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 67 |
+
expectedCountable: 1,
|
| 68 |
+
},
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
runTest(t, tests)
|
| 72 |
+
})
|
| 73 |
+
|
| 74 |
+
t.Run("with no preset, additions", func(t *testing.T) {
|
| 75 |
+
tests := []testcase{
|
| 76 |
+
{
|
| 77 |
+
cfg: models.StopwordConfig{
|
| 78 |
+
Preset: "none",
|
| 79 |
+
Additions: []string{"dog"},
|
| 80 |
+
},
|
| 81 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 82 |
+
expectedCountable: 4,
|
| 83 |
+
},
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
runTest(t, tests)
|
| 87 |
+
})
|
| 88 |
+
|
| 89 |
+
t.Run("with en preset, removals", func(t *testing.T) {
|
| 90 |
+
tests := []testcase{
|
| 91 |
+
{
|
| 92 |
+
cfg: models.StopwordConfig{
|
| 93 |
+
Preset: "en",
|
| 94 |
+
Removals: []string{"a"},
|
| 95 |
+
},
|
| 96 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 97 |
+
expectedCountable: 3,
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
cfg: models.StopwordConfig{
|
| 101 |
+
Preset: "en",
|
| 102 |
+
Removals: []string{"a", "is", "the"},
|
| 103 |
+
},
|
| 104 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 105 |
+
expectedCountable: 5,
|
| 106 |
+
},
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
runTest(t, tests)
|
| 110 |
+
})
|
| 111 |
+
|
| 112 |
+
t.Run("with en preset, removals", func(t *testing.T) {
|
| 113 |
+
tests := []testcase{
|
| 114 |
+
{
|
| 115 |
+
cfg: models.StopwordConfig{
|
| 116 |
+
Preset: "en",
|
| 117 |
+
Removals: []string{"a"},
|
| 118 |
+
},
|
| 119 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 120 |
+
expectedCountable: 3,
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
cfg: models.StopwordConfig{
|
| 124 |
+
Preset: "en",
|
| 125 |
+
Removals: []string{"a", "is", "the"},
|
| 126 |
+
},
|
| 127 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 128 |
+
expectedCountable: 5,
|
| 129 |
+
},
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
runTest(t, tests)
|
| 133 |
+
})
|
| 134 |
+
|
| 135 |
+
t.Run("with en preset, additions, removals", func(t *testing.T) {
|
| 136 |
+
tests := []testcase{
|
| 137 |
+
{
|
| 138 |
+
cfg: models.StopwordConfig{
|
| 139 |
+
Preset: "en",
|
| 140 |
+
Additions: []string{"dog"},
|
| 141 |
+
Removals: []string{"a"},
|
| 142 |
+
},
|
| 143 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 144 |
+
expectedCountable: 2,
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
cfg: models.StopwordConfig{
|
| 148 |
+
Preset: "en",
|
| 149 |
+
Additions: []string{"dog", "best"},
|
| 150 |
+
Removals: []string{"a", "the", "is"},
|
| 151 |
+
},
|
| 152 |
+
input: []string{"a", "dog", "is", "the", "best"},
|
| 153 |
+
expectedCountable: 3,
|
| 154 |
+
},
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
runTest(t, tests)
|
| 158 |
+
})
|
| 159 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/presets.go
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package stopwords
|
| 13 |
+
|
| 14 |
+
const (
|
| 15 |
+
EnglishPreset = "en"
|
| 16 |
+
NoPreset = "none"
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
var Presets = map[string][]string{
|
| 20 |
+
EnglishPreset: {
|
| 21 |
+
"a", "an", "and", "are", "as", "at", "be", "but", "by", "for",
|
| 22 |
+
"if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that",
|
| 23 |
+
"the", "their", "then", "there", "these", "they", "this", "to", "was", "will",
|
| 24 |
+
"with",
|
| 25 |
+
},
|
| 26 |
+
NoPreset: {},
|
| 27 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms.go
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package terms
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"context"
|
| 16 |
+
"encoding/binary"
|
| 17 |
+
"math"
|
| 18 |
+
"sort"
|
| 19 |
+
|
| 20 |
+
"github.com/pkg/errors"
|
| 21 |
+
"github.com/weaviate/weaviate/entities/schema"
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
type DocPointerWithScore struct {
|
| 25 |
+
Id uint64
|
| 26 |
+
// A Frequency of 0 indicates a tombstone
|
| 27 |
+
Frequency float32
|
| 28 |
+
PropLength float32
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
func (d *DocPointerWithScore) FromBytes(in []byte, isTombstone bool, boost float32) error {
|
| 32 |
+
if len(in) < 12 {
|
| 33 |
+
return errors.Errorf("DocPointerWithScore: FromBytes: input too short, expected at least 12 bytes, got %d", len(in))
|
| 34 |
+
}
|
| 35 |
+
// This class is only to be used with a MapList that has fixed key and value lengths (8 and 8) for posting lists
|
| 36 |
+
// Thus, we can proceed with fixed offsets, and ignore reading the key and value lengths, at offset 0 and 10
|
| 37 |
+
// key will be at offset 2, value at offset 12
|
| 38 |
+
return d.FromKeyVal(in[2:10], in[12:], isTombstone, boost)
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
func (d *DocPointerWithScore) FromBytesInverted(in []byte, boost float32, propLen float32) error {
|
| 42 |
+
isTombstone := len(in) == 8
|
| 43 |
+
d.FromKeyVal(in[0:8], in[8:], isTombstone, boost)
|
| 44 |
+
d.PropLength = propLen
|
| 45 |
+
return nil
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
func (d *DocPointerWithScore) FromKeyVal(key []byte, value []byte, isTombstone bool, boost float32) error {
|
| 49 |
+
if len(key) != 8 {
|
| 50 |
+
return errors.Errorf("DocPointerWithScore: FromKeyVal: key length must be 8, got %d", len(key))
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
d.Id = binary.BigEndian.Uint64(key)
|
| 54 |
+
if isTombstone || len(value) < 8 { // tombstone, value length is also checked due to #4125
|
| 55 |
+
// Id and Freq are automatically set to 0
|
| 56 |
+
return nil
|
| 57 |
+
}
|
| 58 |
+
d.Frequency = math.Float32frombits(binary.LittleEndian.Uint32(value[:4])) * boost
|
| 59 |
+
d.PropLength = math.Float32frombits(binary.LittleEndian.Uint32(value[4:]))
|
| 60 |
+
return nil
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
type SortedDocPointerWithScoreMerger struct {
|
| 64 |
+
input [][]DocPointerWithScore
|
| 65 |
+
output []DocPointerWithScore
|
| 66 |
+
offsets []int
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
func NewSortedDocPointerWithScoreMerger() *SortedDocPointerWithScoreMerger {
|
| 70 |
+
return &SortedDocPointerWithScoreMerger{}
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
func (s *SortedDocPointerWithScoreMerger) init(segments [][]DocPointerWithScore) error {
|
| 74 |
+
s.input = segments
|
| 75 |
+
|
| 76 |
+
// all offset pointers initialized at 0 which is where we want to start
|
| 77 |
+
s.offsets = make([]int, len(segments))
|
| 78 |
+
|
| 79 |
+
// The maximum output is the sum of all the input segments if there are only
|
| 80 |
+
// unique keys and zero tombstones. If there are duplicate keys (i.e.
|
| 81 |
+
// updates) or tombstones, we will slice off some elements of the output
|
| 82 |
+
// later, but this way we can be sure each index will always be initialized
|
| 83 |
+
// correctly
|
| 84 |
+
maxOutput := 0
|
| 85 |
+
for _, seg := range segments {
|
| 86 |
+
maxOutput += len(seg)
|
| 87 |
+
}
|
| 88 |
+
s.output = make([]DocPointerWithScore, maxOutput)
|
| 89 |
+
|
| 90 |
+
return nil
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
func (s *SortedDocPointerWithScoreMerger) findSegmentWithLowestKey() (DocPointerWithScore, bool) {
|
| 94 |
+
bestSeg := -1
|
| 95 |
+
bestKey := uint64(0)
|
| 96 |
+
|
| 97 |
+
for segmentID := 0; segmentID < len(s.input); segmentID++ {
|
| 98 |
+
// check if a segment is already exhausted, then skip
|
| 99 |
+
if s.offsets[segmentID] >= len(s.input[segmentID]) {
|
| 100 |
+
continue
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
currKey := s.input[segmentID][s.offsets[segmentID]].Id
|
| 104 |
+
if bestSeg == -1 {
|
| 105 |
+
// first time we're running, no need to compare, just set to current
|
| 106 |
+
bestSeg = segmentID
|
| 107 |
+
bestKey = currKey
|
| 108 |
+
continue
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
if currKey > bestKey {
|
| 112 |
+
// the segment we are currently looking at has a higher key than our
|
| 113 |
+
// current best so we can completely ignore it
|
| 114 |
+
continue
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
if currKey < bestKey {
|
| 118 |
+
// the segment we are currently looking at is a better match than the
|
| 119 |
+
// previous, this means, we have found a new favorite, but the previous
|
| 120 |
+
// best will still be valid in a future round
|
| 121 |
+
bestSeg = segmentID
|
| 122 |
+
bestKey = currKey
|
| 123 |
+
continue
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
if currKey == bestKey {
|
| 127 |
+
// this the most interesting case: we are looking at a duplicate key. In
|
| 128 |
+
// this case the rightmost ("latest") segment takes precedence, however,
|
| 129 |
+
// we must make sure that the previous match gets discarded, otherwise we
|
| 130 |
+
// will find it again in the next round.
|
| 131 |
+
//
|
| 132 |
+
// We can simply increase the offset before updating the bestSeg pointer,
|
| 133 |
+
// which means we will never look at this element again
|
| 134 |
+
s.offsets[bestSeg]++
|
| 135 |
+
|
| 136 |
+
// now that the old element is discarded, we can update our pointers
|
| 137 |
+
bestSeg = segmentID
|
| 138 |
+
bestKey = currKey
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
if bestSeg == -1 {
|
| 143 |
+
// we didn't find anything, looks like we have exhausted all segments
|
| 144 |
+
return DocPointerWithScore{}, false
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
// we can now be sure that bestSeg,bestKey is the latest version of the
|
| 148 |
+
// lowest key, there is only one job left to do: increase the offset, so we
|
| 149 |
+
// never find this segment again
|
| 150 |
+
bestMatch := s.input[bestSeg][s.offsets[bestSeg]]
|
| 151 |
+
s.offsets[bestSeg]++
|
| 152 |
+
|
| 153 |
+
return bestMatch, true
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
func (s *SortedDocPointerWithScoreMerger) Do(ctx context.Context, segments [][]DocPointerWithScore) ([]DocPointerWithScore, error) {
|
| 157 |
+
if err := s.init(segments); err != nil {
|
| 158 |
+
return nil, errors.Wrap(err, "init sorted map decoder")
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
i := 0
|
| 162 |
+
for {
|
| 163 |
+
if i%100 == 0 && ctx.Err() != nil {
|
| 164 |
+
return nil, ctx.Err()
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
match, ok := s.findSegmentWithLowestKey()
|
| 168 |
+
if !ok {
|
| 169 |
+
break
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
if match.Frequency == 0 { // tombstone
|
| 173 |
+
// the latest version of this key was a tombstone, so we can ignore it
|
| 174 |
+
continue
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
s.output[i] = match
|
| 178 |
+
i++
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
return s.output[:i], nil
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
type TermInterface interface {
|
| 185 |
+
// doubles as max impact (with tf=1, the max impact would be 1*Idf), if there
|
| 186 |
+
// is a boost for a queryTerm, simply apply it here once
|
| 187 |
+
Idf() float64
|
| 188 |
+
IdPointer() uint64
|
| 189 |
+
Exhausted() bool
|
| 190 |
+
Count() int
|
| 191 |
+
QueryTermIndex() int
|
| 192 |
+
AdvanceAtLeast(minID uint64)
|
| 193 |
+
AdvanceAtLeastShallow(minID uint64)
|
| 194 |
+
Advance()
|
| 195 |
+
Score(averagePropLength float64, additionalExplanations bool) (uint64, float64, *DocPointerWithScore)
|
| 196 |
+
CurrentBlockImpact() float32
|
| 197 |
+
CurrentBlockMaxId() uint64
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
type Term struct {
|
| 201 |
+
// doubles as max impact (with tf=1, the max impact would be 1*Idf), if there
|
| 202 |
+
// is a boost for a queryTerm, simply apply it here once
|
| 203 |
+
idf float64
|
| 204 |
+
|
| 205 |
+
idPointer uint64
|
| 206 |
+
posPointer uint64
|
| 207 |
+
Data []DocPointerWithScore
|
| 208 |
+
exhausted bool
|
| 209 |
+
queryTerm string
|
| 210 |
+
queryTermIndex int
|
| 211 |
+
propertyBoost float64
|
| 212 |
+
config schema.BM25Config
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
func NewTerm(queryTerm string, queryTermIndex int, propertyBoost float32, config schema.BM25Config) *Term {
|
| 216 |
+
return &Term{
|
| 217 |
+
queryTerm: queryTerm,
|
| 218 |
+
queryTermIndex: queryTermIndex,
|
| 219 |
+
propertyBoost: float64(propertyBoost),
|
| 220 |
+
config: config,
|
| 221 |
+
}
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
func (t *Term) Score(averagePropLength float64, additionalExplanations bool) (uint64, float64, *DocPointerWithScore) {
|
| 225 |
+
pair := t.Data[t.posPointer]
|
| 226 |
+
freq := float64(pair.Frequency)
|
| 227 |
+
tf := freq / (freq + t.config.K1*(1-t.config.B+t.config.B*float64(pair.PropLength)/averagePropLength))
|
| 228 |
+
if !additionalExplanations {
|
| 229 |
+
return t.idPointer, tf * t.idf * t.propertyBoost, nil
|
| 230 |
+
}
|
| 231 |
+
return t.idPointer, tf * t.idf * t.propertyBoost, &pair
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
func (t *Term) Advance() {
|
| 235 |
+
t.posPointer++
|
| 236 |
+
if t.posPointer >= uint64(len(t.Data)) {
|
| 237 |
+
t.exhausted = true
|
| 238 |
+
t.idPointer = math.MaxUint64 // force them to the end of the term list
|
| 239 |
+
} else {
|
| 240 |
+
t.idPointer = t.Data[t.posPointer].Id
|
| 241 |
+
}
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
func (t *Term) AdvanceAtLeast(minID uint64) {
|
| 245 |
+
for t.idPointer < minID {
|
| 246 |
+
t.posPointer++
|
| 247 |
+
if t.posPointer >= uint64(len(t.Data)) {
|
| 248 |
+
t.exhausted = true
|
| 249 |
+
t.idPointer = math.MaxUint64 // force them to the end of the term list
|
| 250 |
+
return
|
| 251 |
+
}
|
| 252 |
+
t.idPointer = t.Data[t.posPointer].Id
|
| 253 |
+
}
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
func (t *Term) AdvanceAtLeastShallow(minID uint64) {
|
| 257 |
+
t.AdvanceAtLeast(minID)
|
| 258 |
+
// go back one document, as the advance blockmax implementation relies on going to the document right before on a shallow advance,
|
| 259 |
+
// due to the way decoding works in the SegmentBlockMax implementation
|
| 260 |
+
t.posPointer--
|
| 261 |
+
t.exhausted = false
|
| 262 |
+
t.idPointer = t.Data[t.posPointer].Id
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
func (t *Term) Count() int {
|
| 266 |
+
return len(t.Data)
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
func (t *Term) Idf() float64 {
|
| 270 |
+
return t.idf
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
func (t *Term) IdPointer() uint64 {
|
| 274 |
+
return t.idPointer
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
func (t *Term) PosPointer() uint64 {
|
| 278 |
+
return t.posPointer
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
func (t *Term) Exhausted() bool {
|
| 282 |
+
return t.exhausted
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
func (t *Term) QueryTerm() string {
|
| 286 |
+
return t.queryTerm
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
func (t *Term) QueryTermIndex() int {
|
| 290 |
+
return t.queryTermIndex
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
func (t *Term) SetIdf(idf float64) {
|
| 294 |
+
t.idf = idf
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
func (t *Term) SetPosPointer(posPointer uint64) {
|
| 298 |
+
t.posPointer = posPointer
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
func (t *Term) SetIdPointer(idPointer uint64) {
|
| 302 |
+
t.idPointer = idPointer
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
func (t *Term) CurrentBlockImpact() float32 {
|
| 306 |
+
return float32(t.idf * t.propertyBoost)
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
func (t *Term) CurrentBlockMaxId() uint64 {
|
| 310 |
+
return t.idPointer
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
type Terms struct {
|
| 314 |
+
T []TermInterface
|
| 315 |
+
Count int
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
func (t *Terms) CompletelyExhausted() bool {
|
| 319 |
+
for i := range t.T {
|
| 320 |
+
if !t.T[i].Exhausted() {
|
| 321 |
+
return false
|
| 322 |
+
}
|
| 323 |
+
}
|
| 324 |
+
return true
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
func (t *Terms) FindMinIDWand(minScore float64) (uint64, int, bool) {
|
| 328 |
+
cumScore := float64(0)
|
| 329 |
+
|
| 330 |
+
for i, term := range t.T {
|
| 331 |
+
if term.Exhausted() {
|
| 332 |
+
continue
|
| 333 |
+
}
|
| 334 |
+
cumScore += term.Idf()
|
| 335 |
+
if cumScore >= minScore {
|
| 336 |
+
return term.IdPointer(), i, false
|
| 337 |
+
}
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
return 0, 0, true
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
func (t *Terms) Pivot(minScore float64) bool {
|
| 344 |
+
minID, pivotPoint, abort := t.FindMinIDWand(minScore)
|
| 345 |
+
if abort {
|
| 346 |
+
return true
|
| 347 |
+
}
|
| 348 |
+
if pivotPoint == 0 {
|
| 349 |
+
return false
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
t.AdvanceAllAtLeast(minID, len(t.T)-1)
|
| 353 |
+
|
| 354 |
+
// we don't need to sort the entire list, just the first pivotPoint elements
|
| 355 |
+
t.SortFirst()
|
| 356 |
+
|
| 357 |
+
return false
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
func (t *Terms) AdvanceAllAtLeast(minID uint64, pivot int) {
|
| 361 |
+
for i := range t.T[:pivot] {
|
| 362 |
+
t.T[i].AdvanceAtLeast(minID)
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
func (t *Terms) FindMinID(minScore float64) (uint64, int, bool) {
|
| 367 |
+
cumScore := float64(0)
|
| 368 |
+
for i, term := range t.T {
|
| 369 |
+
if term.Exhausted() {
|
| 370 |
+
continue
|
| 371 |
+
}
|
| 372 |
+
cumScore += float64(term.CurrentBlockImpact())
|
| 373 |
+
if cumScore >= minScore {
|
| 374 |
+
// find if there is another term with the same id
|
| 375 |
+
for j := i + 1; j < len(t.T); j++ {
|
| 376 |
+
if t.T[j].IdPointer() != term.IdPointer() {
|
| 377 |
+
return t.T[j-1].IdPointer(), j - 1, false
|
| 378 |
+
}
|
| 379 |
+
}
|
| 380 |
+
return t.T[len(t.T)-1].IdPointer(), len(t.T) - 1, false
|
| 381 |
+
}
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
return 0, 0, true
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
func (t *Terms) FindFirstNonExhausted() (int, bool) {
|
| 388 |
+
for i := range t.T {
|
| 389 |
+
if !t.T[i].Exhausted() {
|
| 390 |
+
return i, true
|
| 391 |
+
}
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
return -1, false
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
func (t *Terms) ScoreNext(averagePropLength float64, additionalExplanations bool, minimumOrTokensMatch int) (uint64, float64, []*DocPointerWithScore, bool) {
|
| 398 |
+
var docInfos []*DocPointerWithScore
|
| 399 |
+
|
| 400 |
+
pos, ok := t.FindFirstNonExhausted()
|
| 401 |
+
if !ok {
|
| 402 |
+
// done, nothing left to score
|
| 403 |
+
return 0, 0, docInfos, false
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
if len(t.T) == 0 {
|
| 407 |
+
return 0, 0, docInfos, false
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
if additionalExplanations {
|
| 411 |
+
docInfos = make([]*DocPointerWithScore, t.Count)
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
id := t.T[pos].IdPointer()
|
| 415 |
+
var cumScore float64
|
| 416 |
+
|
| 417 |
+
matchedTerms := 0
|
| 418 |
+
|
| 419 |
+
if len(t.T)-pos < minimumOrTokensMatch {
|
| 420 |
+
return 0, 0, docInfos, false
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
for i := pos; i < len(t.T); i++ {
|
| 424 |
+
if t.T[i].IdPointer() != id || t.T[i].Exhausted() {
|
| 425 |
+
continue
|
| 426 |
+
}
|
| 427 |
+
matchedTerms++
|
| 428 |
+
term := t.T[i]
|
| 429 |
+
_, score, docInfo := term.Score(averagePropLength, additionalExplanations)
|
| 430 |
+
term.Advance()
|
| 431 |
+
if additionalExplanations {
|
| 432 |
+
docInfos[term.QueryTermIndex()] = docInfo
|
| 433 |
+
}
|
| 434 |
+
cumScore += score
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
if matchedTerms < minimumOrTokensMatch {
|
| 438 |
+
// not enough terms matched, return 0
|
| 439 |
+
return 0, 0, docInfos, false
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
// t.FullSort()
|
| 443 |
+
return id, cumScore, docInfos, true
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
// provide sort interface
|
| 447 |
+
func (t *Terms) Len() int {
|
| 448 |
+
return len(t.T)
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
func (t *Terms) Less(i, j int) bool {
|
| 452 |
+
return t.T[i].IdPointer() < t.T[j].IdPointer()
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
func (t *Terms) Swap(i, j int) {
|
| 456 |
+
t.T[i], t.T[j] = t.T[j], t.T[i]
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
func (t *Terms) SortFull() {
|
| 460 |
+
sort.Sort(t)
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
func (t *Terms) SortFirst() {
|
| 464 |
+
min := uint64(0)
|
| 465 |
+
minIndex := -1
|
| 466 |
+
for i := 0; i < len(t.T); i++ {
|
| 467 |
+
if minIndex == -1 || (t.T[i].IdPointer() < min && !t.T[i].Exhausted()) {
|
| 468 |
+
min = t.T[i].IdPointer()
|
| 469 |
+
minIndex = i
|
| 470 |
+
}
|
| 471 |
+
}
|
| 472 |
+
if minIndex > 0 {
|
| 473 |
+
t.T[0], t.T[minIndex] = t.T[minIndex], t.T[0]
|
| 474 |
+
}
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
func (t *Terms) SortPartial(nextList int) {
|
| 478 |
+
for i := nextList + 1; i < len(t.T); i++ {
|
| 479 |
+
if t.T[i].IdPointer() <= t.T[i-1].IdPointer() {
|
| 480 |
+
// swap
|
| 481 |
+
t.T[i], t.T[i-1] = t.T[i-1], t.T[i]
|
| 482 |
+
} else {
|
| 483 |
+
break
|
| 484 |
+
}
|
| 485 |
+
}
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
func (t *Terms) GetBlockUpperBound(pivot int, pivotId uint64) float32 {
|
| 489 |
+
blockMaxScore := float32(0)
|
| 490 |
+
for i := 0; i < pivot+1; i++ {
|
| 491 |
+
if t.T[i].Exhausted() {
|
| 492 |
+
continue
|
| 493 |
+
}
|
| 494 |
+
if t.T[i].CurrentBlockMaxId() < pivotId {
|
| 495 |
+
t.T[i].AdvanceAtLeastShallow(pivotId)
|
| 496 |
+
}
|
| 497 |
+
blockMaxScore += t.T[i].CurrentBlockImpact()
|
| 498 |
+
}
|
| 499 |
+
return blockMaxScore
|
| 500 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms_block.go
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package terms
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"encoding/binary"
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
var (
|
| 19 |
+
BLOCK_SIZE = 128
|
| 20 |
+
// if we are only encoding few documents, we can encode the doc ids and tfs as full bytes.
|
| 21 |
+
// Limit for this is currently set to 1
|
| 22 |
+
ENCODE_AS_FULL_BYTES = 1
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
type BlockEntry struct {
|
| 26 |
+
MaxId uint64
|
| 27 |
+
Offset uint32
|
| 28 |
+
MaxImpactTf uint32
|
| 29 |
+
MaxImpactPropLength uint32
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
func (b BlockEntry) Size() int {
|
| 33 |
+
return 20
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
func (b *BlockEntry) Encode() []byte {
|
| 37 |
+
out := make([]byte, 20)
|
| 38 |
+
binary.LittleEndian.PutUint64(out, b.MaxId)
|
| 39 |
+
binary.LittleEndian.PutUint32(out[8:], b.Offset)
|
| 40 |
+
binary.LittleEndian.PutUint32(out[12:], b.MaxImpactTf)
|
| 41 |
+
binary.LittleEndian.PutUint32(out[16:], b.MaxImpactPropLength)
|
| 42 |
+
return out
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
func DecodeBlockEntry(data []byte) *BlockEntry {
|
| 46 |
+
return &BlockEntry{
|
| 47 |
+
MaxId: binary.LittleEndian.Uint64(data),
|
| 48 |
+
Offset: binary.LittleEndian.Uint32(data[8:]),
|
| 49 |
+
MaxImpactTf: binary.LittleEndian.Uint32(data[12:]),
|
| 50 |
+
MaxImpactPropLength: binary.LittleEndian.Uint32(data[16:]),
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
type BlockDataDecoded struct {
|
| 55 |
+
DocIds []uint64
|
| 56 |
+
Tfs []uint64
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
type BlockData struct {
|
| 60 |
+
DocIds []byte
|
| 61 |
+
Tfs []byte
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
func (b *BlockData) Size() int {
|
| 65 |
+
return 2*2 + len(b.DocIds) + len(b.Tfs)
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
func (b *BlockData) Encode() []byte {
|
| 69 |
+
out := make([]byte, len(b.DocIds)+len(b.Tfs)+4)
|
| 70 |
+
offset := 0
|
| 71 |
+
// write the lengths of the slices
|
| 72 |
+
binary.LittleEndian.PutUint16(out[offset:], uint16(len(b.DocIds)))
|
| 73 |
+
offset += 2
|
| 74 |
+
binary.LittleEndian.PutUint16(out[offset:], uint16(len(b.Tfs)))
|
| 75 |
+
offset += 2
|
| 76 |
+
|
| 77 |
+
offset += copy(out[offset:], b.DocIds)
|
| 78 |
+
copy(out[offset:], b.Tfs)
|
| 79 |
+
return out
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
func DecodeBlockData(data []byte) *BlockData {
|
| 83 |
+
docIdsLen := binary.LittleEndian.Uint16(data)
|
| 84 |
+
termFreqsLen := binary.LittleEndian.Uint16(data[2:])
|
| 85 |
+
return &BlockData{
|
| 86 |
+
DocIds: data[4 : 4+docIdsLen],
|
| 87 |
+
Tfs: data[4+docIdsLen : 4+docIdsLen+termFreqsLen],
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
func DecodeBlockDataReusable(data []byte, out *BlockData) {
|
| 92 |
+
docIdsLen := binary.LittleEndian.Uint16(data)
|
| 93 |
+
termFreqsLen := binary.LittleEndian.Uint16(data[2:])
|
| 94 |
+
out.DocIds = data[4 : 4+docIdsLen]
|
| 95 |
+
out.Tfs = data[4+docIdsLen : 4+docIdsLen+termFreqsLen]
|
| 96 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection_reusable.go
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"errors"
|
| 16 |
+
"io"
|
| 17 |
+
|
| 18 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
type segmentCursorCollectionReusable struct {
|
| 22 |
+
cache *cacheReader
|
| 23 |
+
nodeBuf segmentCollectionNode
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
func (s *segment) newCollectionCursorReusable() *segmentCursorCollectionReusable {
|
| 27 |
+
return &segmentCursorCollectionReusable{
|
| 28 |
+
cache: newCacheReader(s),
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
func (s *segmentCursorCollectionReusable) next() ([]byte, []value, error) {
|
| 33 |
+
if err := s.cache.CheckPosition(); err != nil {
|
| 34 |
+
return nil, nil, err
|
| 35 |
+
}
|
| 36 |
+
return s.parseCollectionNodeInto()
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
func (s *segmentCursorCollectionReusable) first() ([]byte, []value, error) {
|
| 40 |
+
s.cache.Reset()
|
| 41 |
+
if err := s.cache.CheckPosition(); err != nil {
|
| 42 |
+
return nil, nil, err
|
| 43 |
+
}
|
| 44 |
+
return s.parseCollectionNodeInto()
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
func (s *segmentCursorCollectionReusable) parseCollectionNodeInto() ([]byte, []value, error) {
|
| 48 |
+
err := ParseCollectionNodeInto(s.cache, &s.nodeBuf)
|
| 49 |
+
if err != nil {
|
| 50 |
+
return s.nodeBuf.primaryKey, nil, err
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
return s.nodeBuf.primaryKey, s.nodeBuf.values, nil
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
type cacheReader struct {
|
| 57 |
+
readCache []byte
|
| 58 |
+
positionInCache uint64
|
| 59 |
+
segment *segment
|
| 60 |
+
positionInSegment uint64
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
func newCacheReader(s *segment) *cacheReader {
|
| 64 |
+
cacheSize := uint64(4096)
|
| 65 |
+
if s.dataEndPos-s.dataStartPos < cacheSize {
|
| 66 |
+
cacheSize = s.dataEndPos - s.dataStartPos
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
return &cacheReader{
|
| 70 |
+
readCache: make([]byte, 0, cacheSize),
|
| 71 |
+
segment: s,
|
| 72 |
+
positionInSegment: s.dataStartPos,
|
| 73 |
+
}
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
func (c *cacheReader) CheckPosition() error {
|
| 77 |
+
if c.positionInSegment >= c.segment.dataEndPos {
|
| 78 |
+
return lsmkv.NotFound
|
| 79 |
+
}
|
| 80 |
+
return nil
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
func (c *cacheReader) Reset() {
|
| 84 |
+
c.positionInCache = 0
|
| 85 |
+
c.positionInSegment = c.segment.dataStartPos
|
| 86 |
+
c.readCache = c.readCache[:0] // forces a new read
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
func (c *cacheReader) Read(p []byte) (n int, err error) {
|
| 90 |
+
length := uint64(len(p))
|
| 91 |
+
if c.positionInSegment+length > c.segment.dataEndPos {
|
| 92 |
+
return 0, lsmkv.NotFound
|
| 93 |
+
}
|
| 94 |
+
if c.positionInCache+length > uint64(len(c.readCache)) {
|
| 95 |
+
if err := c.loadDataIntoCache(len(p)); err != nil {
|
| 96 |
+
return 0, err
|
| 97 |
+
}
|
| 98 |
+
}
|
| 99 |
+
copy(p, c.readCache[c.positionInCache:c.positionInCache+length])
|
| 100 |
+
|
| 101 |
+
c.positionInSegment += length
|
| 102 |
+
c.positionInCache += length
|
| 103 |
+
|
| 104 |
+
return len(p), nil
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
func (c *cacheReader) loadDataIntoCache(readLength int) error {
|
| 108 |
+
at, err := c.segment.newNodeReader(nodeOffset{start: c.positionInSegment}, "CursorCollectionReusable")
|
| 109 |
+
if err != nil {
|
| 110 |
+
return err
|
| 111 |
+
}
|
| 112 |
+
defer at.Release()
|
| 113 |
+
|
| 114 |
+
// Restore the original buffer capacity before reading
|
| 115 |
+
c.readCache = c.readCache[:cap(c.readCache)]
|
| 116 |
+
|
| 117 |
+
if readLength > len(c.readCache) {
|
| 118 |
+
c.readCache = make([]byte, readLength)
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
read, err := at.Read(c.readCache)
|
| 122 |
+
if err != nil && (!errors.Is(err, io.EOF) || read == 0) {
|
| 123 |
+
return err
|
| 124 |
+
}
|
| 125 |
+
if read < readLength {
|
| 126 |
+
return lsmkv.NotFound
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
c.readCache = c.readCache[:read]
|
| 130 |
+
c.positionInCache = 0
|
| 131 |
+
return nil
|
| 132 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_inverted_reusable.go
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"encoding/binary"
|
| 16 |
+
|
| 17 |
+
"github.com/weaviate/weaviate/adapters/repos/db/inverted/terms"
|
| 18 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
type segmentCursorInvertedReusable struct {
|
| 22 |
+
segment *segment
|
| 23 |
+
nextOffset uint64
|
| 24 |
+
nodeBuf binarySearchNodeMap
|
| 25 |
+
propLengths map[uint64]uint32
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
func (s *segment) newInvertedCursorReusable() *segmentCursorInvertedReusable {
|
| 29 |
+
propLengths, err := s.GetPropertyLengths()
|
| 30 |
+
if err != nil {
|
| 31 |
+
return nil
|
| 32 |
+
}
|
| 33 |
+
return &segmentCursorInvertedReusable{
|
| 34 |
+
segment: s,
|
| 35 |
+
propLengths: propLengths,
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
func (s *segmentCursorInvertedReusable) seek(key []byte) ([]byte, []MapPair, error) {
|
| 40 |
+
node, err := s.segment.index.Seek(key)
|
| 41 |
+
if err != nil {
|
| 42 |
+
return nil, nil, err
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
err = s.parseInvertedNodeInto(nodeOffset{node.Start, node.End})
|
| 46 |
+
if err != nil {
|
| 47 |
+
return nil, nil, err
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
s.nextOffset = node.End
|
| 51 |
+
|
| 52 |
+
return s.nodeBuf.key, s.nodeBuf.values, nil
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
func (s *segmentCursorInvertedReusable) next() ([]byte, []MapPair, error) {
|
| 56 |
+
if s.nextOffset >= s.segment.dataEndPos {
|
| 57 |
+
return nil, nil, lsmkv.NotFound
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
err := s.parseInvertedNodeInto(nodeOffset{start: s.nextOffset})
|
| 61 |
+
if err != nil {
|
| 62 |
+
return nil, nil, err
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
return s.nodeBuf.key, s.nodeBuf.values, nil
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
func (s *segmentCursorInvertedReusable) first() ([]byte, []MapPair, error) {
|
| 69 |
+
s.nextOffset = s.segment.dataStartPos
|
| 70 |
+
|
| 71 |
+
if s.nextOffset >= s.segment.dataEndPos {
|
| 72 |
+
return nil, nil, lsmkv.NotFound
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
err := s.parseInvertedNodeInto(nodeOffset{start: s.nextOffset})
|
| 76 |
+
if err != nil {
|
| 77 |
+
return nil, nil, err
|
| 78 |
+
}
|
| 79 |
+
return s.nodeBuf.key, s.nodeBuf.values, nil
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
func (s *segmentCursorInvertedReusable) parseInvertedNodeInto(offset nodeOffset) error {
|
| 83 |
+
buffer := make([]byte, 16)
|
| 84 |
+
r, err := s.segment.newNodeReader(offset, "segmentCursorInvertedReusable")
|
| 85 |
+
if err != nil {
|
| 86 |
+
return err
|
| 87 |
+
}
|
| 88 |
+
defer r.Release()
|
| 89 |
+
|
| 90 |
+
_, err = r.Read(buffer)
|
| 91 |
+
if err != nil {
|
| 92 |
+
return err
|
| 93 |
+
}
|
| 94 |
+
docCount := binary.LittleEndian.Uint64(buffer[:8])
|
| 95 |
+
end := uint64(20)
|
| 96 |
+
if docCount > uint64(terms.ENCODE_AS_FULL_BYTES) {
|
| 97 |
+
end = binary.LittleEndian.Uint64(buffer[8:16]) + 16
|
| 98 |
+
}
|
| 99 |
+
offset.end = offset.start + end + 4
|
| 100 |
+
|
| 101 |
+
r, err = s.segment.newNodeReader(offset, "segmentCursorInvertedReusable")
|
| 102 |
+
if err != nil {
|
| 103 |
+
return err
|
| 104 |
+
}
|
| 105 |
+
defer r.Release()
|
| 106 |
+
|
| 107 |
+
allBytes := make([]byte, offset.end-offset.start)
|
| 108 |
+
|
| 109 |
+
_, err = r.Read(allBytes)
|
| 110 |
+
if err != nil {
|
| 111 |
+
return err
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
nodes, _ := decodeAndConvertFromBlocks(allBytes)
|
| 115 |
+
|
| 116 |
+
keyLen := binary.LittleEndian.Uint32(allBytes[len(allBytes)-4:])
|
| 117 |
+
|
| 118 |
+
offset.start = offset.end
|
| 119 |
+
offset.end += uint64(keyLen)
|
| 120 |
+
key := make([]byte, keyLen)
|
| 121 |
+
|
| 122 |
+
// empty keys are possible if using non-word tokenizers, so let's handle them
|
| 123 |
+
if keyLen > 0 {
|
| 124 |
+
r, err = s.segment.newNodeReader(offset, "segmentCursorInvertedReusable")
|
| 125 |
+
if err != nil {
|
| 126 |
+
return err
|
| 127 |
+
}
|
| 128 |
+
defer r.Release()
|
| 129 |
+
_, err = r.Read(key)
|
| 130 |
+
if err != nil {
|
| 131 |
+
return err
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
s.nodeBuf.key = key
|
| 135 |
+
s.nodeBuf.values = nodes
|
| 136 |
+
|
| 137 |
+
s.nextOffset = offset.end
|
| 138 |
+
|
| 139 |
+
return nil
|
| 140 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_map.go
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"io"
|
| 16 |
+
|
| 17 |
+
"github.com/pkg/errors"
|
| 18 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 19 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
type segmentCursorMap struct {
|
| 23 |
+
segment *segment
|
| 24 |
+
nextOffset uint64
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
func (s *segment) newMapCursor() *segmentCursorMap {
|
| 28 |
+
return &segmentCursorMap{
|
| 29 |
+
segment: s,
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
func (sg *SegmentGroup) newMapCursors() ([]innerCursorMap, func()) {
|
| 34 |
+
segments, release := sg.getAndLockSegments()
|
| 35 |
+
|
| 36 |
+
out := make([]innerCursorMap, len(segments))
|
| 37 |
+
|
| 38 |
+
for i, segment := range segments {
|
| 39 |
+
sgm := segment.getSegment()
|
| 40 |
+
if sgm.getStrategy() == segmentindex.StrategyInverted {
|
| 41 |
+
out[i] = sgm.newInvertedCursorReusable()
|
| 42 |
+
} else {
|
| 43 |
+
out[i] = sgm.newMapCursor()
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
return out, release
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
func (s *segmentCursorMap) decode(parsed segmentCollectionNode) ([]MapPair, error) {
|
| 51 |
+
pairs := make([]MapPair, len(parsed.values))
|
| 52 |
+
for i := range pairs {
|
| 53 |
+
if s.segment.strategy == segmentindex.StrategyInverted {
|
| 54 |
+
if err := pairs[i].FromBytesInverted(parsed.values[i].value, false); err != nil {
|
| 55 |
+
return nil, err
|
| 56 |
+
}
|
| 57 |
+
} else {
|
| 58 |
+
if err := pairs[i].FromBytes(parsed.values[i].value, false); err != nil {
|
| 59 |
+
return nil, err
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
pairs[i].Tombstone = parsed.values[i].tombstone
|
| 63 |
+
}
|
| 64 |
+
return pairs, nil
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
func (s *segmentCursorMap) seek(key []byte) ([]byte, []MapPair, error) {
|
| 68 |
+
node, err := s.segment.index.Seek(key)
|
| 69 |
+
if err != nil {
|
| 70 |
+
return nil, nil, err
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
var parsed segmentCollectionNode
|
| 74 |
+
|
| 75 |
+
if s.segment.strategy == segmentindex.StrategyInverted {
|
| 76 |
+
parsed, err = s.parseInvertedNode(nodeOffset{node.Start, node.End})
|
| 77 |
+
} else {
|
| 78 |
+
parsed, err = s.parseCollectionNode(nodeOffset{node.Start, node.End})
|
| 79 |
+
}
|
| 80 |
+
// make sure to set the next offset before checking the error. The error
|
| 81 |
+
// could be 'Deleted' which would require that the offset is still advanced
|
| 82 |
+
// for the next cycle
|
| 83 |
+
s.nextOffset = node.End
|
| 84 |
+
if err != nil {
|
| 85 |
+
return parsed.primaryKey, nil, err
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
pairs, err := s.decode(parsed)
|
| 89 |
+
return parsed.primaryKey, pairs, err
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
func (s *segmentCursorMap) next() ([]byte, []MapPair, error) {
|
| 93 |
+
if s.nextOffset >= s.segment.dataEndPos {
|
| 94 |
+
return nil, nil, lsmkv.NotFound
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
var parsed segmentCollectionNode
|
| 98 |
+
var err error
|
| 99 |
+
|
| 100 |
+
if s.segment.strategy == segmentindex.StrategyInverted {
|
| 101 |
+
parsed, err = s.parseInvertedNode(nodeOffset{start: s.nextOffset})
|
| 102 |
+
} else {
|
| 103 |
+
parsed, err = s.parseCollectionNode(nodeOffset{start: s.nextOffset})
|
| 104 |
+
}
|
| 105 |
+
// make sure to set the next offset before checking the error. The error
|
| 106 |
+
// could be 'Deleted' which would require that the offset is still advanced
|
| 107 |
+
// for the next cycle
|
| 108 |
+
s.nextOffset = s.nextOffset + uint64(parsed.offset)
|
| 109 |
+
if err != nil {
|
| 110 |
+
return parsed.primaryKey, nil, err
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
pairs, err := s.decode(parsed)
|
| 114 |
+
return parsed.primaryKey, pairs, err
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
func (s *segmentCursorMap) first() ([]byte, []MapPair, error) {
|
| 118 |
+
if s.segment.dataStartPos == s.segment.dataEndPos {
|
| 119 |
+
return nil, nil, lsmkv.NotFound
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
s.nextOffset = s.segment.dataStartPos
|
| 123 |
+
|
| 124 |
+
var parsed segmentCollectionNode
|
| 125 |
+
var err error
|
| 126 |
+
|
| 127 |
+
if s.segment.strategy == segmentindex.StrategyInverted {
|
| 128 |
+
parsed, err = s.parseInvertedNode(nodeOffset{start: s.nextOffset})
|
| 129 |
+
} else {
|
| 130 |
+
parsed, err = s.parseCollectionNode(nodeOffset{start: s.nextOffset})
|
| 131 |
+
}
|
| 132 |
+
// make sure to set the next offset before checking the error. The error
|
| 133 |
+
// could be 'Deleted' which would require that the offset is still advanced
|
| 134 |
+
// for the next cycle
|
| 135 |
+
s.nextOffset = s.nextOffset + uint64(parsed.offset)
|
| 136 |
+
if err != nil {
|
| 137 |
+
if errors.Is(err, io.EOF) {
|
| 138 |
+
// an empty map could have been generated due to an issue in compaction
|
| 139 |
+
return nil, nil, lsmkv.NotFound
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
return parsed.primaryKey, nil, err
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
pairs, err := s.decode(parsed)
|
| 146 |
+
return parsed.primaryKey, pairs, err
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
func (s *segmentCursorMap) parseCollectionNode(offset nodeOffset) (segmentCollectionNode, error) {
|
| 150 |
+
r, err := s.segment.newNodeReader(offset, "segmentCursorMap")
|
| 151 |
+
if err != nil {
|
| 152 |
+
return segmentCollectionNode{}, err
|
| 153 |
+
}
|
| 154 |
+
defer r.Release()
|
| 155 |
+
|
| 156 |
+
return ParseCollectionNode(r)
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
func (s *segmentCursorMap) parseInvertedNode(offset nodeOffset) (segmentCollectionNode, error) {
|
| 160 |
+
r, err := s.segment.newNodeReader(offset, "segmentCursorMap")
|
| 161 |
+
if err != nil {
|
| 162 |
+
return segmentCollectionNode{}, err
|
| 163 |
+
}
|
| 164 |
+
defer r.Release()
|
| 165 |
+
|
| 166 |
+
return ParseInvertedNode(r)
|
| 167 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_replace.go
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 16 |
+
"github.com/weaviate/weaviate/usecases/byteops"
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
type segmentCursorReplace struct {
|
| 20 |
+
segment *segment
|
| 21 |
+
index diskIndex
|
| 22 |
+
keyFn func(n *segmentReplaceNode) []byte
|
| 23 |
+
firstOffsetFn func() (uint64, error)
|
| 24 |
+
nextOffsetFn func(n *segmentReplaceNode) (uint64, error)
|
| 25 |
+
currOffset uint64
|
| 26 |
+
reusableNode *segmentReplaceNode
|
| 27 |
+
reusableBORW byteops.ReadWriter
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
func (s *segment) newCursor() *segmentCursorReplace {
|
| 31 |
+
cursor := &segmentCursorReplace{
|
| 32 |
+
segment: s,
|
| 33 |
+
index: s.index,
|
| 34 |
+
firstOffsetFn: func() (uint64, error) {
|
| 35 |
+
if s.dataStartPos == s.dataEndPos {
|
| 36 |
+
return 0, lsmkv.NotFound
|
| 37 |
+
}
|
| 38 |
+
return s.dataStartPos, nil
|
| 39 |
+
},
|
| 40 |
+
currOffset: s.dataStartPos,
|
| 41 |
+
keyFn: func(n *segmentReplaceNode) []byte {
|
| 42 |
+
return n.primaryKey
|
| 43 |
+
},
|
| 44 |
+
reusableNode: &segmentReplaceNode{},
|
| 45 |
+
reusableBORW: byteops.NewReadWriter(nil),
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
cursor.nextOffsetFn = func(n *segmentReplaceNode) (uint64, error) {
|
| 49 |
+
return cursor.currOffset + uint64(n.offset), nil
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
return cursor
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// Note: scanning over secondary keys is sub-optimal
|
| 56 |
+
// i.e. no sequential scan is possible as when scanning over the primary key
|
| 57 |
+
|
| 58 |
+
func (s *segment) newCursorWithSecondaryIndex(pos int) *segmentCursorReplace {
|
| 59 |
+
return &segmentCursorReplace{
|
| 60 |
+
segment: s,
|
| 61 |
+
index: s.secondaryIndices[pos],
|
| 62 |
+
keyFn: func(n *segmentReplaceNode) []byte {
|
| 63 |
+
return n.secondaryKeys[pos]
|
| 64 |
+
},
|
| 65 |
+
firstOffsetFn: func() (uint64, error) {
|
| 66 |
+
index := s.secondaryIndices[pos]
|
| 67 |
+
n, err := index.Seek(nil)
|
| 68 |
+
if err != nil {
|
| 69 |
+
return 0, err
|
| 70 |
+
}
|
| 71 |
+
return n.Start, nil
|
| 72 |
+
},
|
| 73 |
+
nextOffsetFn: func(n *segmentReplaceNode) (uint64, error) {
|
| 74 |
+
index := s.secondaryIndices[pos]
|
| 75 |
+
next, err := index.Next(n.secondaryKeys[pos])
|
| 76 |
+
if err != nil {
|
| 77 |
+
return 0, err
|
| 78 |
+
}
|
| 79 |
+
return next.Start, nil
|
| 80 |
+
},
|
| 81 |
+
reusableNode: &segmentReplaceNode{
|
| 82 |
+
secondaryIndexCount: s.secondaryIndexCount,
|
| 83 |
+
secondaryKeys: make([][]byte, s.secondaryIndexCount),
|
| 84 |
+
},
|
| 85 |
+
reusableBORW: byteops.NewReadWriter(nil),
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
func (sg *SegmentGroup) newCursors() ([]innerCursorReplace, func()) {
|
| 90 |
+
segments, release := sg.getAndLockSegments()
|
| 91 |
+
|
| 92 |
+
out := make([]innerCursorReplace, len(segments))
|
| 93 |
+
|
| 94 |
+
for i, segment := range segments {
|
| 95 |
+
out[i] = segment.newCursor()
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
return out, release
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
func (sg *SegmentGroup) newCursorsWithFlushingSupport() ([]innerCursorReplace, func()) {
|
| 102 |
+
sg.cursorsLock.Lock()
|
| 103 |
+
defer sg.cursorsLock.Unlock()
|
| 104 |
+
|
| 105 |
+
sg.activeCursors++
|
| 106 |
+
|
| 107 |
+
sg.maintenanceLock.RLock()
|
| 108 |
+
|
| 109 |
+
var segments []Segment
|
| 110 |
+
|
| 111 |
+
if len(sg.enqueuedSegments) == 0 {
|
| 112 |
+
segments = sg.segments
|
| 113 |
+
} else {
|
| 114 |
+
segments = make([]Segment, 0, len(sg.segments)+len(sg.enqueuedSegments))
|
| 115 |
+
segments = append(segments, sg.segments...)
|
| 116 |
+
segments = append(segments, sg.enqueuedSegments...)
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
out := make([]innerCursorReplace, 0, len(segments))
|
| 120 |
+
|
| 121 |
+
for _, segment := range segments {
|
| 122 |
+
out = append(out, segment.newCursor())
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
release := func() {
|
| 126 |
+
sg.maintenanceLock.RUnlock()
|
| 127 |
+
|
| 128 |
+
sg.cursorsLock.Lock()
|
| 129 |
+
defer sg.cursorsLock.Unlock()
|
| 130 |
+
|
| 131 |
+
sg.activeCursors--
|
| 132 |
+
|
| 133 |
+
if sg.activeCursors == 0 && len(sg.enqueuedSegments) > 0 {
|
| 134 |
+
sg.maintenanceLock.Lock()
|
| 135 |
+
defer sg.maintenanceLock.Unlock()
|
| 136 |
+
|
| 137 |
+
sg.segments = append(sg.segments, sg.enqueuedSegments...)
|
| 138 |
+
sg.enqueuedSegments = nil
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
return out, release
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
func (sg *SegmentGroup) newCursorsWithSecondaryIndex(pos int) ([]innerCursorReplace, func()) {
|
| 146 |
+
segments, release := sg.getAndLockSegments()
|
| 147 |
+
out := make([]innerCursorReplace, 0, len(segments))
|
| 148 |
+
|
| 149 |
+
for _, segment := range segments {
|
| 150 |
+
if int(segment.getSecondaryIndexCount()) <= pos {
|
| 151 |
+
continue
|
| 152 |
+
}
|
| 153 |
+
out = append(out, segment.newCursorWithSecondaryIndex(pos))
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
return out, release
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
func (s *segmentCursorReplace) seek(key []byte) ([]byte, []byte, error) {
|
| 160 |
+
node, err := s.index.Seek(key)
|
| 161 |
+
if err != nil {
|
| 162 |
+
return nil, nil, err
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
s.currOffset = node.Start
|
| 166 |
+
|
| 167 |
+
err = s.parseReplaceNodeInto(nodeOffset{start: node.Start, end: node.End},
|
| 168 |
+
s.segment.contents[node.Start:node.End])
|
| 169 |
+
if err != nil {
|
| 170 |
+
return s.keyFn(s.reusableNode), nil, err
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
return s.keyFn(s.reusableNode), s.reusableNode.value, nil
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
func (s *segmentCursorReplace) next() ([]byte, []byte, error) {
|
| 177 |
+
nextOffset, err := s.nextOffsetFn(s.reusableNode)
|
| 178 |
+
if err != nil {
|
| 179 |
+
return nil, nil, err
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
if nextOffset >= s.segment.dataEndPos {
|
| 183 |
+
return nil, nil, lsmkv.NotFound
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
s.currOffset = nextOffset
|
| 187 |
+
|
| 188 |
+
err = s.parseReplaceNodeInto(nodeOffset{start: s.currOffset},
|
| 189 |
+
s.segment.contents[s.currOffset:])
|
| 190 |
+
if err != nil {
|
| 191 |
+
return s.keyFn(s.reusableNode), nil, err
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
return s.keyFn(s.reusableNode), s.reusableNode.value, nil
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
func (s *segmentCursorReplace) first() ([]byte, []byte, error) {
|
| 198 |
+
firstOffset, err := s.firstOffsetFn()
|
| 199 |
+
if err != nil {
|
| 200 |
+
return nil, nil, err
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
s.currOffset = firstOffset
|
| 204 |
+
|
| 205 |
+
err = s.parseReplaceNodeInto(nodeOffset{start: s.currOffset},
|
| 206 |
+
s.segment.contents[s.currOffset:])
|
| 207 |
+
if err != nil {
|
| 208 |
+
return s.keyFn(s.reusableNode), nil, err
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
return s.keyFn(s.reusableNode), s.reusableNode.value, nil
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
func (s *segmentCursorReplace) nextWithAllKeys() (n segmentReplaceNode, err error) {
|
| 215 |
+
nextOffset, err := s.nextOffsetFn(s.reusableNode)
|
| 216 |
+
if err != nil {
|
| 217 |
+
return n, err
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
if nextOffset >= s.segment.dataEndPos {
|
| 221 |
+
return n, lsmkv.NotFound
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
s.currOffset = nextOffset
|
| 225 |
+
|
| 226 |
+
n, err = s.parseReplaceNode(nodeOffset{start: s.currOffset})
|
| 227 |
+
|
| 228 |
+
s.reusableNode = &n
|
| 229 |
+
|
| 230 |
+
return n, err
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
func (s *segmentCursorReplace) firstWithAllKeys() (n segmentReplaceNode, err error) {
|
| 234 |
+
firstOffset, err := s.firstOffsetFn()
|
| 235 |
+
if err != nil {
|
| 236 |
+
return n, err
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
s.currOffset = firstOffset
|
| 240 |
+
|
| 241 |
+
n, err = s.parseReplaceNode(nodeOffset{start: s.currOffset})
|
| 242 |
+
|
| 243 |
+
s.reusableNode = &n
|
| 244 |
+
|
| 245 |
+
return n, err
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
func (s *segmentCursorReplace) parseReplaceNode(offset nodeOffset) (segmentReplaceNode, error) {
|
| 249 |
+
r, err := s.segment.newNodeReader(offset, "segmentCursorReplace")
|
| 250 |
+
if err != nil {
|
| 251 |
+
return segmentReplaceNode{}, err
|
| 252 |
+
}
|
| 253 |
+
defer r.Release()
|
| 254 |
+
|
| 255 |
+
out, err := ParseReplaceNode(r, s.segment.secondaryIndexCount)
|
| 256 |
+
if out.tombstone {
|
| 257 |
+
return out, lsmkv.Deleted
|
| 258 |
+
}
|
| 259 |
+
return out, err
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
func (s *segmentCursorReplace) parseReplaceNodeInto(offset nodeOffset, buf []byte) error {
|
| 263 |
+
if s.segment.readFromMemory {
|
| 264 |
+
return s.parse(buf)
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
r, err := s.segment.newNodeReader(offset, "segmentCursorReplace")
|
| 268 |
+
if err != nil {
|
| 269 |
+
return err
|
| 270 |
+
}
|
| 271 |
+
defer r.Release()
|
| 272 |
+
|
| 273 |
+
err = ParseReplaceNodeIntoPread(r, s.segment.secondaryIndexCount, s.reusableNode)
|
| 274 |
+
if err != nil {
|
| 275 |
+
return err
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
if s.reusableNode.tombstone {
|
| 279 |
+
return lsmkv.Deleted
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
return nil
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
func (s *segmentCursorReplace) parse(in []byte) error {
|
| 286 |
+
if len(in) == 0 {
|
| 287 |
+
return lsmkv.NotFound
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
s.reusableBORW.ResetBuffer(in)
|
| 291 |
+
|
| 292 |
+
err := ParseReplaceNodeIntoMMAP(&s.reusableBORW, s.segment.secondaryIndexCount,
|
| 293 |
+
s.reusableNode)
|
| 294 |
+
if err != nil {
|
| 295 |
+
return err
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
if s.reusableNode.tombstone {
|
| 299 |
+
return lsmkv.Deleted
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
return nil
|
| 303 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set.go
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 16 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
func (s *segment) newRoaringSetCursor() *roaringset.SegmentCursor {
|
| 20 |
+
return roaringset.NewSegmentCursor(s.contents[s.dataStartPos:s.dataEndPos],
|
| 21 |
+
&roaringSetSeeker{s.index})
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
func (sg *SegmentGroup) newRoaringSetCursors() ([]roaringset.InnerCursor, func()) {
|
| 25 |
+
segments, release := sg.getAndLockSegments()
|
| 26 |
+
|
| 27 |
+
out := make([]roaringset.InnerCursor, len(segments))
|
| 28 |
+
|
| 29 |
+
for i, segment := range segments {
|
| 30 |
+
out[i] = segment.newRoaringSetCursor()
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
return out, release
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// diskIndex returns node's Start and End offsets
|
| 37 |
+
// taking into account HeaderSize. SegmentCursor of RoaringSet
|
| 38 |
+
// accepts only payload part of underlying segment content, therefore
|
| 39 |
+
// offsets should be adjusted and reduced by HeaderSize
|
| 40 |
+
type roaringSetSeeker struct {
|
| 41 |
+
diskIndex diskIndex
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
func (s *roaringSetSeeker) Seek(key []byte) (segmentindex.Node, error) {
|
| 45 |
+
node, err := s.diskIndex.Seek(key)
|
| 46 |
+
if err != nil {
|
| 47 |
+
return segmentindex.Node{}, err
|
| 48 |
+
}
|
| 49 |
+
return segmentindex.Node{
|
| 50 |
+
Key: node.Key,
|
| 51 |
+
Start: node.Start - segmentindex.HeaderSize,
|
| 52 |
+
End: node.End - segmentindex.HeaderSize,
|
| 53 |
+
}, nil
|
| 54 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set_range.go
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"io"
|
| 16 |
+
|
| 17 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
|
| 18 |
+
"github.com/weaviate/weaviate/entities/concurrency"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
func (sg *SegmentGroup) newRoaringSetRangeReaders() ([]roaringsetrange.InnerReader, func()) {
|
| 22 |
+
segments, release := sg.getAndLockSegments()
|
| 23 |
+
|
| 24 |
+
readers := make([]roaringsetrange.InnerReader, len(segments))
|
| 25 |
+
for i, segment := range segments {
|
| 26 |
+
readers[i] = segment.newRoaringSetRangeReader()
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
return readers, release
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
func (s *segment) newRoaringSetRangeReader() *roaringsetrange.SegmentReader {
|
| 33 |
+
var segmentCursor roaringsetrange.SegmentCursor
|
| 34 |
+
if s.readFromMemory {
|
| 35 |
+
segmentCursor = roaringsetrange.NewSegmentCursorMmap(s.contents[s.dataStartPos:s.dataEndPos])
|
| 36 |
+
} else {
|
| 37 |
+
sectionReader := io.NewSectionReader(s.contentFile, int64(s.dataStartPos), int64(s.dataEndPos))
|
| 38 |
+
// since segment reader concurrenlty fetches next segment and merges bitmaps of previous segments
|
| 39 |
+
// at least 2 buffers needs to be used by cursor not to overwrite data before they are consumed.
|
| 40 |
+
segmentCursor = roaringsetrange.NewSegmentCursorPread(sectionReader, 2)
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
return roaringsetrange.NewSegmentReaderConcurrent(
|
| 44 |
+
roaringsetrange.NewGaplessSegmentCursor(segmentCursor),
|
| 45 |
+
concurrency.SROAR_MERGE)
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
func (s *segment) newRoaringSetRangeCursor() roaringsetrange.SegmentCursor {
|
| 49 |
+
if s.readFromMemory {
|
| 50 |
+
return roaringsetrange.NewSegmentCursorMmap(s.contents[s.dataStartPos:s.dataEndPos])
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
sectionReader := io.NewSectionReader(s.contentFile, int64(s.dataStartPos), int64(s.dataEndPos))
|
| 54 |
+
// compactor does not work concurrently, next segment is fetched after previous one gets consumed,
|
| 55 |
+
// therefore just one buffer is sufficient.
|
| 56 |
+
return roaringsetrange.NewSegmentCursorPread(sectionReader, 1)
|
| 57 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/doc.go
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
/*
|
| 13 |
+
# LSMKV (= Log-structured Merge-Tree Key-Value Store)
|
| 14 |
+
|
| 15 |
+
This package contains Weaviate's custom LSM store. While modeled after the
|
| 16 |
+
usecases that are required for Weaviate to be fast, reliable, and scalable, it
|
| 17 |
+
is technically completely independent. You could build your own database on top
|
| 18 |
+
of this key-value store.
|
| 19 |
+
|
| 20 |
+
Covering the architecture of [LSM Stores] in general goes beyond the scope of
|
| 21 |
+
this documentation. Therefore things that are specific to this implementation
|
| 22 |
+
are highlighted.
|
| 23 |
+
|
| 24 |
+
# Strategies
|
| 25 |
+
|
| 26 |
+
To understand the different type of buckets in this store, you need to
|
| 27 |
+
familiarize yourself with the following strategies. A strategy defines a
|
| 28 |
+
different usecase for a [Bucket].
|
| 29 |
+
|
| 30 |
+
- "Replace"
|
| 31 |
+
|
| 32 |
+
Replace resembles the classical key-value store. Each key has exactly one
|
| 33 |
+
value. A subsequent PUT on an an existing key, replaces the value (hence
|
| 34 |
+
the name "replace"). Once replaced a former value can no longer be
|
| 35 |
+
retrieved, and will eventually be removed in compactions.
|
| 36 |
+
|
| 37 |
+
- "Set" (aka "SetCollection")
|
| 38 |
+
|
| 39 |
+
A set behaves like an unordered collection of independent values. In other
|
| 40 |
+
words a single key has multiple values. For example, for key "foo", you
|
| 41 |
+
could have values "bar1", "bar2", "bazzinga". A bucket of this type is
|
| 42 |
+
optimized for cheap writes to add new set additions. For example adding
|
| 43 |
+
another set element has a fixed cost independent of the number of the
|
| 44 |
+
existing set length. This makes it very well suited for building an
|
| 45 |
+
inverted index.
|
| 46 |
+
|
| 47 |
+
Retrieving a Set has a slight cost to it if a set is spread across multiple
|
| 48 |
+
segments. This cost will eventually reduce as more and more compactions
|
| 49 |
+
happen. In the ideal case (fully compacted DB), retrieving a Set requires
|
| 50 |
+
just a single disk read.
|
| 51 |
+
|
| 52 |
+
- "Map" (aka "MapCollection")
|
| 53 |
+
|
| 54 |
+
Maps are similar to Sets in the sense that for a single key there are
|
| 55 |
+
multiple values. However, each value is in itself a key-value pair. This
|
| 56 |
+
makes this type very similar to a dict or hashmap type. For example for
|
| 57 |
+
key "foo", you could have value pairs: "bar":17, "baz":19.
|
| 58 |
+
|
| 59 |
+
This makes a map a great use case for an inverted index that needs to store
|
| 60 |
+
additional info beyond just the docid-pointer, such as in the case of a
|
| 61 |
+
BM25 index where the term frequency needs to be stored.
|
| 62 |
+
|
| 63 |
+
The same performance-considerations as for sets apply.
|
| 64 |
+
|
| 65 |
+
# Navigate around these docs
|
| 66 |
+
|
| 67 |
+
Good entrypoints to learn more about how this package works include [Store]
|
| 68 |
+
with [New] and [Store.CreateOrLoadBucket], as well as [Bucket] with
|
| 69 |
+
[Bucket.Get], [Bucket.GetBySecondary], [Bucket.Put], etc.
|
| 70 |
+
|
| 71 |
+
Each strategy also supports cursor types: [CursorReplace] can be created using [Bucket.Cursor], [CursorSet] can be created with [Bucket.SetCursor] , and [CursorMap] can be created with [Bucket.MapCursor].
|
| 72 |
+
|
| 73 |
+
[LSM Stores]: https://en.wikipedia.org/wiki/Log-structured_merge-tree
|
| 74 |
+
*/
|
| 75 |
+
package lsmkv
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/doc.go
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
// ent contains common types used throughout various lsmkv (sub-)packages
|
| 13 |
+
package entities
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/strategies.go
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package entities
|
| 13 |
+
|
| 14 |
+
const (
|
| 15 |
+
// StrategyReplace allows for idem-potent PUT where the latest takes presence
|
| 16 |
+
StrategyReplace = "replace"
|
| 17 |
+
StrategySetCollection = "setcollection"
|
| 18 |
+
StrategyMapCollection = "mapcollection"
|
| 19 |
+
StrategyRoaringSet = "roaringset"
|
| 20 |
+
StrategyInverted = "inverted"
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
type SegmentStrategy uint16
|
| 24 |
+
|
| 25 |
+
const (
|
| 26 |
+
SegmentStrategyReplace SegmentStrategy = iota
|
| 27 |
+
SegmentStrategySetCollection
|
| 28 |
+
SegmentStrategyMapCollection
|
| 29 |
+
SegmentStrategyRoaringSet
|
| 30 |
+
SegmentStrategyInverted
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
func SegmentStrategyFromString(in string) SegmentStrategy {
|
| 34 |
+
switch in {
|
| 35 |
+
case StrategyReplace:
|
| 36 |
+
return SegmentStrategyReplace
|
| 37 |
+
case StrategySetCollection:
|
| 38 |
+
return SegmentStrategySetCollection
|
| 39 |
+
case StrategyMapCollection:
|
| 40 |
+
return SegmentStrategyMapCollection
|
| 41 |
+
case StrategyRoaringSet:
|
| 42 |
+
return SegmentStrategyRoaringSet
|
| 43 |
+
case StrategyInverted:
|
| 44 |
+
return SegmentStrategyInverted
|
| 45 |
+
default:
|
| 46 |
+
panic("unsupported strategy")
|
| 47 |
+
}
|
| 48 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/fake.wal
ADDED
|
File without changes
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/global_bucket_registry.go
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"errors"
|
| 16 |
+
"fmt"
|
| 17 |
+
"sync"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
type globalBucketRegistry struct {
|
| 21 |
+
buckets map[string]struct{}
|
| 22 |
+
mu sync.Mutex
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
func newGlobalBucketRegistry() *globalBucketRegistry {
|
| 26 |
+
return &globalBucketRegistry{
|
| 27 |
+
buckets: make(map[string]struct{}),
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
var GlobalBucketRegistry *globalBucketRegistry
|
| 32 |
+
|
| 33 |
+
func init() {
|
| 34 |
+
GlobalBucketRegistry = newGlobalBucketRegistry()
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
var ErrBucketAlreadyRegistered = errors.New("bucket already registered")
|
| 38 |
+
|
| 39 |
+
func (r *globalBucketRegistry) TryAdd(absoluteBucketPath string) error {
|
| 40 |
+
r.mu.Lock()
|
| 41 |
+
defer r.mu.Unlock()
|
| 42 |
+
|
| 43 |
+
if _, ok := r.buckets[absoluteBucketPath]; ok {
|
| 44 |
+
return fmt.Errorf("bucket %q: %w", absoluteBucketPath, ErrBucketAlreadyRegistered)
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
r.buckets[absoluteBucketPath] = struct{}{}
|
| 48 |
+
return nil
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
func (r *globalBucketRegistry) Remove(absoluteBucketPath string) {
|
| 52 |
+
r.mu.Lock()
|
| 53 |
+
defer r.mu.Unlock()
|
| 54 |
+
|
| 55 |
+
delete(r.buckets, absoluteBucketPath)
|
| 56 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/helper_for_test.go
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
//go:build integrationTest
|
| 13 |
+
// +build integrationTest
|
| 14 |
+
|
| 15 |
+
package lsmkv
|
| 16 |
+
|
| 17 |
+
import (
|
| 18 |
+
"math/rand"
|
| 19 |
+
"time"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
func getRandomSeed() *rand.Rand {
|
| 23 |
+
return rand.New(rand.NewSource(time.Now().UnixNano()))
|
| 24 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/lazy_segment.go
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"fmt"
|
| 16 |
+
"regexp"
|
| 17 |
+
"strconv"
|
| 18 |
+
"sync"
|
| 19 |
+
|
| 20 |
+
"github.com/weaviate/sroar"
|
| 21 |
+
|
| 22 |
+
"github.com/sirupsen/logrus"
|
| 23 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 24 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 25 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
type lazySegment struct {
|
| 29 |
+
path string
|
| 30 |
+
logger logrus.FieldLogger
|
| 31 |
+
metrics *Metrics
|
| 32 |
+
existsLower existsOnLowerSegmentsFn
|
| 33 |
+
cfg segmentConfig
|
| 34 |
+
|
| 35 |
+
segment *segment
|
| 36 |
+
mux sync.Mutex
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
func newLazySegment(path string, logger logrus.FieldLogger, metrics *Metrics,
|
| 40 |
+
existsLower existsOnLowerSegmentsFn, cfg segmentConfig,
|
| 41 |
+
) (*lazySegment, error) {
|
| 42 |
+
if metrics != nil && metrics.LazySegmentInit != nil {
|
| 43 |
+
metrics.LazySegmentInit.Inc()
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
return &lazySegment{
|
| 47 |
+
path: path,
|
| 48 |
+
logger: logger,
|
| 49 |
+
metrics: metrics,
|
| 50 |
+
existsLower: existsLower,
|
| 51 |
+
cfg: cfg,
|
| 52 |
+
}, nil
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
func (s *lazySegment) load() error {
|
| 56 |
+
s.mux.Lock()
|
| 57 |
+
defer s.mux.Unlock()
|
| 58 |
+
|
| 59 |
+
if s.segment == nil {
|
| 60 |
+
segment, err := newSegment(s.path, s.logger, s.metrics, s.existsLower, s.cfg)
|
| 61 |
+
if err != nil {
|
| 62 |
+
return err
|
| 63 |
+
}
|
| 64 |
+
s.segment = segment
|
| 65 |
+
if s.metrics != nil && s.metrics.LazySegmentLoad != nil {
|
| 66 |
+
s.metrics.LazySegmentLoad.Inc()
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
return nil
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
func (s *lazySegment) mustLoad() {
|
| 74 |
+
err := s.load()
|
| 75 |
+
if err != nil {
|
| 76 |
+
panic(fmt.Errorf("error loading segment %q: %w", s.path, err))
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
func (s *lazySegment) getPath() string {
|
| 81 |
+
return s.path
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
func (s *lazySegment) setPath(path string) {
|
| 85 |
+
s.mustLoad()
|
| 86 |
+
s.segment.setPath(path)
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
func (s *lazySegment) getStrategy() segmentindex.Strategy {
|
| 90 |
+
strategy, found := s.numberFromPath("s")
|
| 91 |
+
if found {
|
| 92 |
+
return segmentindex.Strategy(strategy)
|
| 93 |
+
}
|
| 94 |
+
s.mustLoad()
|
| 95 |
+
return s.segment.getStrategy()
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
func (s *lazySegment) getSecondaryIndexCount() uint16 {
|
| 99 |
+
s.mustLoad()
|
| 100 |
+
return s.segment.getSecondaryIndexCount()
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
func (s *lazySegment) getLevel() uint16 {
|
| 104 |
+
level, found := s.numberFromPath("l")
|
| 105 |
+
if found {
|
| 106 |
+
return uint16(level)
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
s.mustLoad()
|
| 110 |
+
return s.segment.getLevel()
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
func (s *lazySegment) getSize() int64 {
|
| 114 |
+
s.mustLoad()
|
| 115 |
+
return s.segment.getSize()
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
func (s *lazySegment) setSize(size int64) {
|
| 119 |
+
s.mustLoad()
|
| 120 |
+
s.segment.setSize(size)
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
func (s *lazySegment) PayloadSize() int {
|
| 124 |
+
s.mustLoad()
|
| 125 |
+
return s.segment.PayloadSize()
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
func (s *lazySegment) Size() int {
|
| 129 |
+
s.mustLoad()
|
| 130 |
+
return s.segment.Size()
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
func (s *lazySegment) close() error {
|
| 134 |
+
s.mux.Lock()
|
| 135 |
+
defer s.mux.Unlock()
|
| 136 |
+
|
| 137 |
+
if s.metrics != nil && s.metrics.LazySegmentClose != nil {
|
| 138 |
+
s.metrics.LazySegmentClose.Inc()
|
| 139 |
+
}
|
| 140 |
+
if s.segment == nil {
|
| 141 |
+
return nil
|
| 142 |
+
}
|
| 143 |
+
if s.metrics != nil && s.metrics.LazySegmentUnLoad != nil {
|
| 144 |
+
s.metrics.LazySegmentUnLoad.Inc()
|
| 145 |
+
}
|
| 146 |
+
return s.segment.close()
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
func (s *lazySegment) get(key []byte) ([]byte, error) {
|
| 150 |
+
s.mustLoad()
|
| 151 |
+
return s.segment.get(key)
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
func (s *lazySegment) getBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, []byte, error) {
|
| 155 |
+
s.mustLoad()
|
| 156 |
+
return s.segment.getBySecondaryIntoMemory(pos, key, buffer)
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
func (s *lazySegment) getCollection(key []byte) ([]value, error) {
|
| 160 |
+
s.mustLoad()
|
| 161 |
+
return s.segment.getCollection(key)
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
func (s *lazySegment) getInvertedData() *segmentInvertedData {
|
| 165 |
+
s.mustLoad()
|
| 166 |
+
return s.segment.getInvertedData()
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
func (s *lazySegment) getSegment() *segment {
|
| 170 |
+
s.mustLoad()
|
| 171 |
+
return s.segment
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
func (s *lazySegment) isLoaded() bool {
|
| 175 |
+
s.mux.Lock()
|
| 176 |
+
defer s.mux.Unlock()
|
| 177 |
+
|
| 178 |
+
return s.segment != nil
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
func (s *lazySegment) markForDeletion() error {
|
| 182 |
+
s.mustLoad()
|
| 183 |
+
return s.segment.markForDeletion()
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
func (s *lazySegment) MergeTombstones(other *sroar.Bitmap) (*sroar.Bitmap, error) {
|
| 187 |
+
s.mustLoad()
|
| 188 |
+
return s.segment.MergeTombstones(other)
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
func (s *lazySegment) newCollectionCursor() *segmentCursorCollection {
|
| 192 |
+
s.mustLoad()
|
| 193 |
+
return s.segment.newCollectionCursor()
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
func (s *lazySegment) newCollectionCursorReusable() *segmentCursorCollectionReusable {
|
| 197 |
+
s.mustLoad()
|
| 198 |
+
return s.segment.newCollectionCursorReusable()
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
func (s *lazySegment) newCursor() *segmentCursorReplace {
|
| 202 |
+
s.mustLoad()
|
| 203 |
+
return s.segment.newCursor()
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
func (s *lazySegment) newCursorWithSecondaryIndex(pos int) *segmentCursorReplace {
|
| 207 |
+
s.mustLoad()
|
| 208 |
+
return s.segment.newCursorWithSecondaryIndex(pos)
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
func (s *lazySegment) newMapCursor() *segmentCursorMap {
|
| 212 |
+
s.mustLoad()
|
| 213 |
+
return s.segment.newMapCursor()
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
func (s *lazySegment) newNodeReader(offset nodeOffset, operation string) (*nodeReader, error) {
|
| 217 |
+
s.mustLoad()
|
| 218 |
+
return s.segment.newNodeReader(offset, operation)
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
func (s *lazySegment) newRoaringSetCursor() *roaringset.SegmentCursor {
|
| 222 |
+
s.mustLoad()
|
| 223 |
+
return s.segment.newRoaringSetCursor()
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
func (s *lazySegment) newRoaringSetRangeCursor() roaringsetrange.SegmentCursor {
|
| 227 |
+
s.mustLoad()
|
| 228 |
+
return s.segment.newRoaringSetRangeCursor()
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
func (s *lazySegment) newRoaringSetRangeReader() *roaringsetrange.SegmentReader {
|
| 232 |
+
s.mustLoad()
|
| 233 |
+
return s.segment.newRoaringSetRangeReader()
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
func (s *lazySegment) quantileKeys(q int) [][]byte {
|
| 237 |
+
s.mustLoad()
|
| 238 |
+
return s.segment.quantileKeys(q)
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
func (s *lazySegment) ReadOnlyTombstones() (*sroar.Bitmap, error) {
|
| 242 |
+
s.mustLoad()
|
| 243 |
+
return s.segment.ReadOnlyTombstones()
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
func (s *lazySegment) replaceStratParseData(in []byte) ([]byte, []byte, error) {
|
| 247 |
+
s.mustLoad()
|
| 248 |
+
return s.segment.replaceStratParseData(in)
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
func (s *lazySegment) roaringSetGet(key []byte, bitmapBufPool roaringset.BitmapBufPool,
|
| 252 |
+
) (roaringset.BitmapLayer, func(), error) {
|
| 253 |
+
s.mustLoad()
|
| 254 |
+
return s.segment.roaringSetGet(key, bitmapBufPool)
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
func (s *lazySegment) roaringSetMergeWith(key []byte, input roaringset.BitmapLayer, bitmapBufPool roaringset.BitmapBufPool,
|
| 258 |
+
) error {
|
| 259 |
+
s.mustLoad()
|
| 260 |
+
return s.segment.roaringSetMergeWith(key, input, bitmapBufPool)
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
func (s *lazySegment) numberFromPath(str string) (int, bool) {
|
| 264 |
+
template := fmt.Sprintf(`\.%s(\d+)\.`, str)
|
| 265 |
+
re := regexp.MustCompile(template)
|
| 266 |
+
match := re.FindStringSubmatch(s.path)
|
| 267 |
+
if len(match) > 1 {
|
| 268 |
+
num, err := strconv.Atoi(match[1])
|
| 269 |
+
if err == nil {
|
| 270 |
+
return num, true
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
return 0, false
|
| 274 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable.go
ADDED
|
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"encoding/binary"
|
| 16 |
+
"fmt"
|
| 17 |
+
"math"
|
| 18 |
+
"path/filepath"
|
| 19 |
+
"sync"
|
| 20 |
+
"time"
|
| 21 |
+
|
| 22 |
+
"github.com/weaviate/weaviate/usecases/memwatch"
|
| 23 |
+
|
| 24 |
+
"github.com/pkg/errors"
|
| 25 |
+
"github.com/sirupsen/logrus"
|
| 26 |
+
"github.com/weaviate/sroar"
|
| 27 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 28 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
|
| 29 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 30 |
+
"github.com/weaviate/weaviate/entities/models"
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
type Memtable struct {
|
| 34 |
+
sync.RWMutex
|
| 35 |
+
key *binarySearchTree
|
| 36 |
+
keyMulti *binarySearchTreeMulti
|
| 37 |
+
keyMap *binarySearchTreeMap
|
| 38 |
+
primaryIndex *binarySearchTree
|
| 39 |
+
roaringSet *roaringset.BinarySearchTree
|
| 40 |
+
roaringSetRange *roaringsetrange.Memtable
|
| 41 |
+
commitlog memtableCommitLogger
|
| 42 |
+
allocChecker memwatch.AllocChecker
|
| 43 |
+
size uint64
|
| 44 |
+
path string
|
| 45 |
+
strategy string
|
| 46 |
+
secondaryIndices uint16
|
| 47 |
+
secondaryToPrimary []map[string][]byte
|
| 48 |
+
// stores time memtable got dirty to determine when flush is needed
|
| 49 |
+
dirtyAt time.Time
|
| 50 |
+
createdAt time.Time
|
| 51 |
+
metrics *memtableMetrics
|
| 52 |
+
writesSinceLastSync bool
|
| 53 |
+
|
| 54 |
+
tombstones *sroar.Bitmap
|
| 55 |
+
|
| 56 |
+
enableChecksumValidation bool
|
| 57 |
+
|
| 58 |
+
bm25config *models.BM25Config
|
| 59 |
+
averagePropLength float64
|
| 60 |
+
propLengthCount uint64
|
| 61 |
+
writeSegmentInfoIntoFileName bool
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
func newMemtable(path string, strategy string, secondaryIndices uint16,
|
| 65 |
+
cl memtableCommitLogger, metrics *Metrics, logger logrus.FieldLogger,
|
| 66 |
+
enableChecksumValidation bool, bm25config *models.BM25Config, writeSegmentInfoIntoFileName bool, allocChecker memwatch.AllocChecker,
|
| 67 |
+
) (*Memtable, error) {
|
| 68 |
+
m := &Memtable{
|
| 69 |
+
key: &binarySearchTree{},
|
| 70 |
+
keyMulti: &binarySearchTreeMulti{},
|
| 71 |
+
keyMap: &binarySearchTreeMap{},
|
| 72 |
+
primaryIndex: &binarySearchTree{}, // todo, sort upfront
|
| 73 |
+
roaringSet: &roaringset.BinarySearchTree{},
|
| 74 |
+
roaringSetRange: roaringsetrange.NewMemtable(logger),
|
| 75 |
+
commitlog: cl,
|
| 76 |
+
path: path,
|
| 77 |
+
strategy: strategy,
|
| 78 |
+
secondaryIndices: secondaryIndices,
|
| 79 |
+
dirtyAt: time.Time{},
|
| 80 |
+
createdAt: time.Now(),
|
| 81 |
+
metrics: newMemtableMetrics(metrics, filepath.Dir(path), strategy),
|
| 82 |
+
enableChecksumValidation: enableChecksumValidation,
|
| 83 |
+
bm25config: bm25config,
|
| 84 |
+
writeSegmentInfoIntoFileName: writeSegmentInfoIntoFileName,
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
if m.secondaryIndices > 0 {
|
| 88 |
+
m.secondaryToPrimary = make([]map[string][]byte, m.secondaryIndices)
|
| 89 |
+
for i := range m.secondaryToPrimary {
|
| 90 |
+
m.secondaryToPrimary[i] = map[string][]byte{}
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
m.metrics.size(m.size)
|
| 95 |
+
|
| 96 |
+
if m.strategy == StrategyInverted {
|
| 97 |
+
m.tombstones = sroar.NewBitmap()
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
return m, nil
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
func (m *Memtable) get(key []byte) ([]byte, error) {
|
| 104 |
+
start := time.Now()
|
| 105 |
+
defer m.metrics.get(start.UnixNano())
|
| 106 |
+
|
| 107 |
+
if m.strategy != StrategyReplace {
|
| 108 |
+
return nil, errors.Errorf("get only possible with strategy 'replace'")
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
m.RLock()
|
| 112 |
+
defer m.RUnlock()
|
| 113 |
+
|
| 114 |
+
return m.key.get(key)
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
func (m *Memtable) getBySecondary(pos int, key []byte) ([]byte, error) {
|
| 118 |
+
start := time.Now()
|
| 119 |
+
defer m.metrics.getBySecondary(start.UnixNano())
|
| 120 |
+
|
| 121 |
+
if m.strategy != StrategyReplace {
|
| 122 |
+
return nil, errors.Errorf("get only possible with strategy 'replace'")
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
m.RLock()
|
| 126 |
+
defer m.RUnlock()
|
| 127 |
+
|
| 128 |
+
primary := m.secondaryToPrimary[pos][string(key)]
|
| 129 |
+
if primary == nil {
|
| 130 |
+
return nil, lsmkv.NotFound
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
return m.key.get(primary)
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
func (m *Memtable) put(key, value []byte, opts ...SecondaryKeyOption) error {
|
| 137 |
+
start := time.Now()
|
| 138 |
+
defer m.metrics.put(start.UnixNano())
|
| 139 |
+
|
| 140 |
+
if m.strategy != StrategyReplace {
|
| 141 |
+
return errors.Errorf("put only possible with strategy 'replace'")
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
m.Lock()
|
| 145 |
+
defer m.Unlock()
|
| 146 |
+
m.writesSinceLastSync = true
|
| 147 |
+
|
| 148 |
+
var secondaryKeys [][]byte
|
| 149 |
+
if m.secondaryIndices > 0 {
|
| 150 |
+
secondaryKeys = make([][]byte, m.secondaryIndices)
|
| 151 |
+
for _, opt := range opts {
|
| 152 |
+
if err := opt(secondaryKeys); err != nil {
|
| 153 |
+
return err
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
if err := m.commitlog.put(segmentReplaceNode{
|
| 159 |
+
primaryKey: key,
|
| 160 |
+
value: value,
|
| 161 |
+
secondaryIndexCount: m.secondaryIndices,
|
| 162 |
+
secondaryKeys: secondaryKeys,
|
| 163 |
+
tombstone: false,
|
| 164 |
+
}); err != nil {
|
| 165 |
+
return errors.Wrap(err, "write into commit log")
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
netAdditions, previousKeys := m.key.insert(key, value, secondaryKeys)
|
| 169 |
+
|
| 170 |
+
for i, sec := range previousKeys {
|
| 171 |
+
m.secondaryToPrimary[i][string(sec)] = nil
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
for i, sec := range secondaryKeys {
|
| 175 |
+
m.secondaryToPrimary[i][string(sec)] = key
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
m.size += uint64(netAdditions)
|
| 179 |
+
m.metrics.size(m.size)
|
| 180 |
+
m.updateDirtyAt()
|
| 181 |
+
|
| 182 |
+
return nil
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
func (m *Memtable) setTombstone(key []byte, opts ...SecondaryKeyOption) error {
|
| 186 |
+
start := time.Now()
|
| 187 |
+
defer m.metrics.setTombstone(start.UnixNano())
|
| 188 |
+
|
| 189 |
+
if m.strategy != "replace" {
|
| 190 |
+
return errors.Errorf("setTombstone only possible with strategy 'replace'")
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
m.Lock()
|
| 194 |
+
defer m.Unlock()
|
| 195 |
+
m.writesSinceLastSync = true
|
| 196 |
+
|
| 197 |
+
var secondaryKeys [][]byte
|
| 198 |
+
if m.secondaryIndices > 0 {
|
| 199 |
+
secondaryKeys = make([][]byte, m.secondaryIndices)
|
| 200 |
+
for _, opt := range opts {
|
| 201 |
+
if err := opt(secondaryKeys); err != nil {
|
| 202 |
+
return err
|
| 203 |
+
}
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
if err := m.commitlog.put(segmentReplaceNode{
|
| 208 |
+
primaryKey: key,
|
| 209 |
+
value: nil,
|
| 210 |
+
secondaryIndexCount: m.secondaryIndices,
|
| 211 |
+
secondaryKeys: secondaryKeys,
|
| 212 |
+
tombstone: true,
|
| 213 |
+
}); err != nil {
|
| 214 |
+
return errors.Wrap(err, "write into commit log")
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
m.key.setTombstone(key, nil, secondaryKeys)
|
| 218 |
+
m.size += uint64(len(key)) + 1 // 1 byte for tombstone
|
| 219 |
+
m.metrics.size(m.size)
|
| 220 |
+
m.updateDirtyAt()
|
| 221 |
+
|
| 222 |
+
return nil
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
func (m *Memtable) setTombstoneWith(key []byte, deletionTime time.Time, opts ...SecondaryKeyOption) error {
|
| 226 |
+
start := time.Now()
|
| 227 |
+
defer m.metrics.setTombstone(start.UnixNano())
|
| 228 |
+
|
| 229 |
+
if m.strategy != "replace" {
|
| 230 |
+
return errors.Errorf("setTombstone only possible with strategy 'replace'")
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
m.Lock()
|
| 234 |
+
defer m.Unlock()
|
| 235 |
+
m.writesSinceLastSync = true
|
| 236 |
+
|
| 237 |
+
var secondaryKeys [][]byte
|
| 238 |
+
if m.secondaryIndices > 0 {
|
| 239 |
+
secondaryKeys = make([][]byte, m.secondaryIndices)
|
| 240 |
+
for _, opt := range opts {
|
| 241 |
+
if err := opt(secondaryKeys); err != nil {
|
| 242 |
+
return err
|
| 243 |
+
}
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
tombstonedVal := tombstonedValue(deletionTime)
|
| 248 |
+
|
| 249 |
+
if err := m.commitlog.put(segmentReplaceNode{
|
| 250 |
+
primaryKey: key,
|
| 251 |
+
value: tombstonedVal[:],
|
| 252 |
+
secondaryIndexCount: m.secondaryIndices,
|
| 253 |
+
secondaryKeys: secondaryKeys,
|
| 254 |
+
tombstone: true,
|
| 255 |
+
}); err != nil {
|
| 256 |
+
return errors.Wrap(err, "write into commit log")
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
m.key.setTombstone(key, tombstonedVal[:], secondaryKeys)
|
| 260 |
+
m.size += uint64(len(key)) + 1 // 1 byte for tombstone
|
| 261 |
+
m.metrics.size(m.size)
|
| 262 |
+
m.updateDirtyAt()
|
| 263 |
+
|
| 264 |
+
return nil
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
func tombstonedValue(deletionTime time.Time) []byte {
|
| 268 |
+
var tombstonedVal [1 + 8]byte // version=1 deletionTime
|
| 269 |
+
tombstonedVal[0] = 1
|
| 270 |
+
binary.LittleEndian.PutUint64(tombstonedVal[1:], uint64(deletionTime.UnixMilli()))
|
| 271 |
+
return tombstonedVal[:]
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
func errorFromTombstonedValue(tombstonedVal []byte) error {
|
| 275 |
+
if len(tombstonedVal) == 0 {
|
| 276 |
+
return lsmkv.Deleted
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
if tombstonedVal[0] != 1 {
|
| 280 |
+
return fmt.Errorf("unexpected tomstoned value, unsupported version %d", tombstonedVal[0])
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
if len(tombstonedVal) != 9 {
|
| 284 |
+
return fmt.Errorf("unexpected tomstoned value, invalid length")
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
deletionTimeUnixMilli := int64(binary.LittleEndian.Uint64(tombstonedVal[1:]))
|
| 288 |
+
|
| 289 |
+
return lsmkv.NewErrDeleted(time.UnixMilli(deletionTimeUnixMilli))
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
func (m *Memtable) getCollection(key []byte) ([]value, error) {
|
| 293 |
+
start := time.Now()
|
| 294 |
+
defer m.metrics.getCollection(start.UnixNano())
|
| 295 |
+
|
| 296 |
+
// TODO amourao: check if this is needed for StrategyInverted
|
| 297 |
+
if m.strategy != StrategySetCollection && m.strategy != StrategyMapCollection && m.strategy != StrategyInverted {
|
| 298 |
+
return nil, errors.Errorf("getCollection only possible with strategies %q, %q, %q",
|
| 299 |
+
StrategySetCollection, StrategyMapCollection, StrategyInverted)
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
m.RLock()
|
| 303 |
+
defer m.RUnlock()
|
| 304 |
+
|
| 305 |
+
v, err := m.keyMulti.get(key)
|
| 306 |
+
if err != nil {
|
| 307 |
+
return nil, err
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
return v, nil
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
func (m *Memtable) getMap(key []byte) ([]MapPair, error) {
|
| 314 |
+
start := time.Now()
|
| 315 |
+
defer m.metrics.getMap(start.UnixNano())
|
| 316 |
+
|
| 317 |
+
if m.strategy != StrategyMapCollection && m.strategy != StrategyInverted {
|
| 318 |
+
return nil, errors.Errorf("getMap only possible with strategies %q, %q",
|
| 319 |
+
StrategyMapCollection, StrategyInverted)
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
m.RLock()
|
| 323 |
+
defer m.RUnlock()
|
| 324 |
+
|
| 325 |
+
v, err := m.keyMap.get(key)
|
| 326 |
+
if err != nil {
|
| 327 |
+
return nil, err
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
return v, nil
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
func (m *Memtable) append(key []byte, values []value) error {
|
| 334 |
+
start := time.Now()
|
| 335 |
+
defer m.metrics.append(start.UnixNano())
|
| 336 |
+
|
| 337 |
+
if m.strategy != StrategySetCollection && m.strategy != StrategyMapCollection {
|
| 338 |
+
return errors.Errorf("append only possible with strategies %q, %q",
|
| 339 |
+
StrategySetCollection, StrategyMapCollection)
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
m.Lock()
|
| 343 |
+
defer m.Unlock()
|
| 344 |
+
m.writesSinceLastSync = true
|
| 345 |
+
|
| 346 |
+
if err := m.commitlog.append(segmentCollectionNode{
|
| 347 |
+
primaryKey: key,
|
| 348 |
+
values: values,
|
| 349 |
+
}); err != nil {
|
| 350 |
+
return errors.Wrap(err, "write into commit log")
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
m.keyMulti.insert(key, values)
|
| 354 |
+
m.size += uint64(len(key))
|
| 355 |
+
for _, value := range values {
|
| 356 |
+
m.size += uint64(len(value.value))
|
| 357 |
+
}
|
| 358 |
+
m.metrics.size(m.size)
|
| 359 |
+
m.updateDirtyAt()
|
| 360 |
+
|
| 361 |
+
return nil
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
func (m *Memtable) appendMapSorted(key []byte, pair MapPair) error {
|
| 365 |
+
start := time.Now()
|
| 366 |
+
defer m.metrics.appendMapSorted(start.UnixNano())
|
| 367 |
+
|
| 368 |
+
if m.strategy != StrategyMapCollection && m.strategy != StrategyInverted {
|
| 369 |
+
return errors.Errorf("append only possible with strategy %q, %q",
|
| 370 |
+
StrategyMapCollection, StrategyInverted)
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
valuesForCommitLog, err := pair.Bytes()
|
| 374 |
+
if err != nil {
|
| 375 |
+
return err
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
newNode := segmentCollectionNode{
|
| 379 |
+
primaryKey: key,
|
| 380 |
+
values: []value{
|
| 381 |
+
{
|
| 382 |
+
value: valuesForCommitLog,
|
| 383 |
+
tombstone: pair.Tombstone,
|
| 384 |
+
},
|
| 385 |
+
},
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
m.Lock()
|
| 389 |
+
defer m.Unlock()
|
| 390 |
+
m.writesSinceLastSync = true
|
| 391 |
+
|
| 392 |
+
if err := m.commitlog.append(newNode); err != nil {
|
| 393 |
+
return errors.Wrap(err, "write into commit log")
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
m.keyMap.insert(key, pair)
|
| 397 |
+
m.size += uint64(len(key) + len(valuesForCommitLog))
|
| 398 |
+
m.metrics.size(m.size)
|
| 399 |
+
m.updateDirtyAt()
|
| 400 |
+
|
| 401 |
+
return nil
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
func (m *Memtable) Size() uint64 {
|
| 405 |
+
m.RLock()
|
| 406 |
+
defer m.RUnlock()
|
| 407 |
+
|
| 408 |
+
return m.size
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
func (m *Memtable) ActiveDuration() time.Duration {
|
| 412 |
+
m.RLock()
|
| 413 |
+
defer m.RUnlock()
|
| 414 |
+
|
| 415 |
+
return time.Since(m.createdAt)
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
func (m *Memtable) updateDirtyAt() {
|
| 419 |
+
if m.dirtyAt.IsZero() {
|
| 420 |
+
m.dirtyAt = time.Now()
|
| 421 |
+
}
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
// returns time memtable got dirty (1st write occurred)
|
| 425 |
+
// (0 if clean)
|
| 426 |
+
func (m *Memtable) DirtyDuration() time.Duration {
|
| 427 |
+
m.RLock()
|
| 428 |
+
defer m.RUnlock()
|
| 429 |
+
|
| 430 |
+
if m.dirtyAt.IsZero() {
|
| 431 |
+
return 0
|
| 432 |
+
}
|
| 433 |
+
return time.Since(m.dirtyAt)
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
func (m *Memtable) countStats() *countStats {
|
| 437 |
+
m.RLock()
|
| 438 |
+
defer m.RUnlock()
|
| 439 |
+
return m.key.countStats()
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
// the WAL uses a buffer and isn't written until the buffer size is crossed or
|
| 443 |
+
// this function explicitly called. This allows to safge unnecessary disk
|
| 444 |
+
// writes in larger operations, such as batches. It is sufficient to call write
|
| 445 |
+
// on the WAL just once. This does not make a batch atomic, but it guarantees
|
| 446 |
+
// that the WAL is written before a successful response is returned to the
|
| 447 |
+
// user.
|
| 448 |
+
func (m *Memtable) writeWAL() error {
|
| 449 |
+
m.Lock()
|
| 450 |
+
defer m.Unlock()
|
| 451 |
+
|
| 452 |
+
return m.commitlog.flushBuffers()
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
func (m *Memtable) ReadOnlyTombstones() (*sroar.Bitmap, error) {
|
| 456 |
+
if m.strategy != StrategyInverted {
|
| 457 |
+
return nil, errors.Errorf("tombstones only supported for strategy %q", StrategyInverted)
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
m.RLock()
|
| 461 |
+
defer m.RUnlock()
|
| 462 |
+
|
| 463 |
+
if m.tombstones != nil {
|
| 464 |
+
return m.tombstones.Clone(), nil
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
return nil, lsmkv.NotFound
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
func (m *Memtable) SetTombstone(docId uint64) error {
|
| 471 |
+
if m.strategy != StrategyInverted {
|
| 472 |
+
return errors.Errorf("tombstones only supported for strategy %q", StrategyInverted)
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
m.Lock()
|
| 476 |
+
defer m.Unlock()
|
| 477 |
+
|
| 478 |
+
m.tombstones.Set(docId)
|
| 479 |
+
|
| 480 |
+
return nil
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
func (m *Memtable) GetPropLengths() (uint64, uint64, error) {
|
| 484 |
+
m.RLock()
|
| 485 |
+
flatA := m.keyMap.flattenInOrder()
|
| 486 |
+
m.RUnlock()
|
| 487 |
+
|
| 488 |
+
docIdsLengths := make(map[uint64]uint32)
|
| 489 |
+
propLengthSum := uint64(0)
|
| 490 |
+
propLengthCount := uint64(0)
|
| 491 |
+
|
| 492 |
+
for _, mapNode := range flatA {
|
| 493 |
+
for j := range mapNode.values {
|
| 494 |
+
docId := binary.BigEndian.Uint64(mapNode.values[j].Key)
|
| 495 |
+
if !mapNode.values[j].Tombstone {
|
| 496 |
+
fieldLength := math.Float32frombits(binary.LittleEndian.Uint32(mapNode.values[j].Value[4:]))
|
| 497 |
+
if _, ok := docIdsLengths[docId]; !ok {
|
| 498 |
+
propLengthSum += uint64(fieldLength)
|
| 499 |
+
propLengthCount++
|
| 500 |
+
}
|
| 501 |
+
docIdsLengths[docId] = uint32(fieldLength)
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
return propLengthSum, propLengthCount, nil
|
| 507 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush.go
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"bufio"
|
| 16 |
+
"fmt"
|
| 17 |
+
"os"
|
| 18 |
+
"path/filepath"
|
| 19 |
+
"strings"
|
| 20 |
+
|
| 21 |
+
"github.com/prometheus/client_golang/prometheus"
|
| 22 |
+
"github.com/weaviate/weaviate/usecases/monitoring"
|
| 23 |
+
|
| 24 |
+
"github.com/pkg/errors"
|
| 25 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 26 |
+
"github.com/weaviate/weaviate/entities/diskio"
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
func (m *Memtable) flushWAL() error {
|
| 30 |
+
if err := m.commitlog.close(); err != nil {
|
| 31 |
+
return err
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
if m.Size() == 0 {
|
| 35 |
+
// this is an empty memtable, nothing to do
|
| 36 |
+
// however, we still have to cleanup the commit log, otherwise we will
|
| 37 |
+
// attempt to recover from it on the next cycle
|
| 38 |
+
if err := m.commitlog.delete(); err != nil {
|
| 39 |
+
return errors.Wrap(err, "delete commit log file")
|
| 40 |
+
}
|
| 41 |
+
return nil
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
// fsync parent directory
|
| 45 |
+
err := diskio.Fsync(filepath.Dir(m.path))
|
| 46 |
+
if err != nil {
|
| 47 |
+
return err
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
return nil
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
func (m *Memtable) flush() (segmentPath string, rerr error) {
|
| 54 |
+
// close the commit log first, this also forces it to be fsynced. If
|
| 55 |
+
// something fails there, don't proceed with flushing. The commit log will
|
| 56 |
+
// only be deleted at the very end, if the flush was successful
|
| 57 |
+
// (indicated by a successful close of the flush file - which indicates a
|
| 58 |
+
// successful fsync)
|
| 59 |
+
|
| 60 |
+
if err := m.commitlog.close(); err != nil {
|
| 61 |
+
return "", errors.Wrap(err, "close commit log file")
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
if m.Size() == 0 {
|
| 65 |
+
// this is an empty memtable, nothing to do
|
| 66 |
+
// however, we still have to cleanup the commit log, otherwise we will
|
| 67 |
+
// attempt to recover from it on the next cycle
|
| 68 |
+
if err := m.commitlog.delete(); err != nil {
|
| 69 |
+
return "", errors.Wrap(err, "delete commit log file")
|
| 70 |
+
}
|
| 71 |
+
return "", nil
|
| 72 |
+
}
|
| 73 |
+
var tmpSegmentPath string
|
| 74 |
+
if m.writeSegmentInfoIntoFileName {
|
| 75 |
+
// new segments are always level 0
|
| 76 |
+
tmpSegmentPath = m.path + segmentExtraInfo(0, SegmentStrategyFromString(m.strategy)) + ".db.tmp"
|
| 77 |
+
} else {
|
| 78 |
+
tmpSegmentPath = m.path + ".db.tmp"
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
f, err := os.OpenFile(tmpSegmentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666)
|
| 82 |
+
if err != nil {
|
| 83 |
+
return "", err
|
| 84 |
+
}
|
| 85 |
+
defer func() {
|
| 86 |
+
if rerr != nil {
|
| 87 |
+
f.Close()
|
| 88 |
+
os.Remove(tmpSegmentPath)
|
| 89 |
+
}
|
| 90 |
+
}()
|
| 91 |
+
|
| 92 |
+
observeWrite := m.metrics.writeMemtable
|
| 93 |
+
cb := func(written int64) {
|
| 94 |
+
observeWrite(written)
|
| 95 |
+
}
|
| 96 |
+
meteredF := diskio.NewMeteredWriter(f, cb)
|
| 97 |
+
|
| 98 |
+
bufw := bufio.NewWriter(meteredF)
|
| 99 |
+
segmentFile := segmentindex.NewSegmentFile(
|
| 100 |
+
segmentindex.WithBufferedWriter(bufw),
|
| 101 |
+
segmentindex.WithChecksumsDisabled(!m.enableChecksumValidation),
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
var keys []segmentindex.Key
|
| 105 |
+
skipIndices := false
|
| 106 |
+
|
| 107 |
+
switch m.strategy {
|
| 108 |
+
case StrategyReplace:
|
| 109 |
+
if keys, err = m.flushDataReplace(segmentFile); err != nil {
|
| 110 |
+
return "", err
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
case StrategySetCollection:
|
| 114 |
+
if keys, err = m.flushDataSet(segmentFile); err != nil {
|
| 115 |
+
return "", err
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
case StrategyRoaringSet:
|
| 119 |
+
if keys, err = m.flushDataRoaringSet(segmentFile); err != nil {
|
| 120 |
+
return "", err
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
case StrategyRoaringSetRange:
|
| 124 |
+
if keys, err = m.flushDataRoaringSetRange(segmentFile); err != nil {
|
| 125 |
+
return "", err
|
| 126 |
+
}
|
| 127 |
+
skipIndices = true
|
| 128 |
+
|
| 129 |
+
case StrategyMapCollection:
|
| 130 |
+
if keys, err = m.flushDataMap(segmentFile); err != nil {
|
| 131 |
+
return "", err
|
| 132 |
+
}
|
| 133 |
+
case StrategyInverted:
|
| 134 |
+
if keys, _, err = m.flushDataInverted(segmentFile, meteredF, bufw); err != nil {
|
| 135 |
+
return "", err
|
| 136 |
+
}
|
| 137 |
+
skipIndices = true
|
| 138 |
+
default:
|
| 139 |
+
return "", fmt.Errorf("cannot flush strategy %s", m.strategy)
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
if !skipIndices {
|
| 143 |
+
indexes := &segmentindex.Indexes{
|
| 144 |
+
Keys: keys,
|
| 145 |
+
SecondaryIndexCount: m.secondaryIndices,
|
| 146 |
+
ScratchSpacePath: m.path + ".scratch.d",
|
| 147 |
+
ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{
|
| 148 |
+
"strategy": m.strategy,
|
| 149 |
+
"operation": "writeIndices",
|
| 150 |
+
}),
|
| 151 |
+
AllocChecker: m.allocChecker,
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
if _, err := segmentFile.WriteIndexes(indexes, int64(m.size)); err != nil {
|
| 155 |
+
return "", err
|
| 156 |
+
}
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
if _, err := segmentFile.WriteChecksum(); err != nil {
|
| 160 |
+
return "", err
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
if err := f.Sync(); err != nil {
|
| 164 |
+
return "", err
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
if err := f.Close(); err != nil {
|
| 168 |
+
return "", err
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
segmentPath = strings.TrimSuffix(tmpSegmentPath, ".tmp")
|
| 172 |
+
err = os.Rename(tmpSegmentPath, segmentPath)
|
| 173 |
+
if err != nil {
|
| 174 |
+
return "", err
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
// fsync parent directory
|
| 178 |
+
err = diskio.Fsync(filepath.Dir(m.path))
|
| 179 |
+
if err != nil {
|
| 180 |
+
return "", err
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
// only now that the file has been flushed is it safe to delete the commit log
|
| 184 |
+
// TODO: there might be an interest in keeping the commit logs around for
|
| 185 |
+
// longer as they might come in handy for replication
|
| 186 |
+
return segmentPath, m.commitlog.delete()
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
func (m *Memtable) flushDataReplace(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) {
|
| 190 |
+
flat := m.key.flattenInOrder()
|
| 191 |
+
|
| 192 |
+
totalDataLength := totalKeyAndValueSize(flat)
|
| 193 |
+
perObjectAdditions := len(flat) * (1 + 8 + 4 + int(m.secondaryIndices)*4) // 1 byte for the tombstone, 8 bytes value length encoding, 4 bytes key length encoding, + 4 bytes key encoding for every secondary index
|
| 194 |
+
headerSize := segmentindex.HeaderSize
|
| 195 |
+
header := &segmentindex.Header{
|
| 196 |
+
IndexStart: uint64(totalDataLength + perObjectAdditions + headerSize),
|
| 197 |
+
Level: 0, // always level zero on a new one
|
| 198 |
+
Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation),
|
| 199 |
+
SecondaryIndices: m.secondaryIndices,
|
| 200 |
+
Strategy: SegmentStrategyFromString(m.strategy),
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
n, err := f.WriteHeader(header)
|
| 204 |
+
if err != nil {
|
| 205 |
+
return nil, err
|
| 206 |
+
}
|
| 207 |
+
headerSize = int(n)
|
| 208 |
+
keys := make([]segmentindex.Key, len(flat))
|
| 209 |
+
|
| 210 |
+
totalWritten := headerSize
|
| 211 |
+
for i, node := range flat {
|
| 212 |
+
segNode := &segmentReplaceNode{
|
| 213 |
+
offset: totalWritten,
|
| 214 |
+
tombstone: node.tombstone,
|
| 215 |
+
value: node.value,
|
| 216 |
+
primaryKey: node.key,
|
| 217 |
+
secondaryKeys: node.secondaryKeys,
|
| 218 |
+
secondaryIndexCount: m.secondaryIndices,
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
ki, err := segNode.KeyIndexAndWriteTo(f.BodyWriter())
|
| 222 |
+
if err != nil {
|
| 223 |
+
return nil, errors.Wrapf(err, "write node %d", i)
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
keys[i] = ki
|
| 227 |
+
totalWritten = ki.ValueEnd
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
return keys, nil
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
func (m *Memtable) flushDataSet(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) {
|
| 234 |
+
flat := m.keyMulti.flattenInOrder()
|
| 235 |
+
return m.flushDataCollection(f, flat)
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
func (m *Memtable) flushDataMap(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) {
|
| 239 |
+
m.RLock()
|
| 240 |
+
flat := m.keyMap.flattenInOrder()
|
| 241 |
+
m.RUnlock()
|
| 242 |
+
|
| 243 |
+
// by encoding each map pair we can force the same structure as for a
|
| 244 |
+
// collection, which means we can reuse the same flushing logic
|
| 245 |
+
asMulti := make([]*binarySearchNodeMulti, len(flat))
|
| 246 |
+
for i, mapNode := range flat {
|
| 247 |
+
asMulti[i] = &binarySearchNodeMulti{
|
| 248 |
+
key: mapNode.key,
|
| 249 |
+
values: make([]value, len(mapNode.values)),
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
for j := range asMulti[i].values {
|
| 253 |
+
enc, err := mapNode.values[j].Bytes()
|
| 254 |
+
if err != nil {
|
| 255 |
+
return nil, err
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
asMulti[i].values[j] = value{
|
| 259 |
+
value: enc,
|
| 260 |
+
tombstone: mapNode.values[j].Tombstone,
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
}
|
| 265 |
+
return m.flushDataCollection(f, asMulti)
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
func (m *Memtable) flushDataCollection(f *segmentindex.SegmentFile,
|
| 269 |
+
flat []*binarySearchNodeMulti,
|
| 270 |
+
) ([]segmentindex.Key, error) {
|
| 271 |
+
totalDataLength := totalValueSizeCollection(flat)
|
| 272 |
+
header := &segmentindex.Header{
|
| 273 |
+
IndexStart: uint64(totalDataLength + segmentindex.HeaderSize),
|
| 274 |
+
Level: 0, // always level zero on a new one
|
| 275 |
+
Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation),
|
| 276 |
+
SecondaryIndices: m.secondaryIndices,
|
| 277 |
+
Strategy: SegmentStrategyFromString(m.strategy),
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
n, err := f.WriteHeader(header)
|
| 281 |
+
if err != nil {
|
| 282 |
+
return nil, err
|
| 283 |
+
}
|
| 284 |
+
headerSize := int(n)
|
| 285 |
+
keys := make([]segmentindex.Key, len(flat))
|
| 286 |
+
|
| 287 |
+
totalWritten := headerSize
|
| 288 |
+
for i, node := range flat {
|
| 289 |
+
ki, err := (&segmentCollectionNode{
|
| 290 |
+
values: node.values,
|
| 291 |
+
primaryKey: node.key,
|
| 292 |
+
offset: totalWritten,
|
| 293 |
+
}).KeyIndexAndWriteTo(f.BodyWriter())
|
| 294 |
+
if err != nil {
|
| 295 |
+
return nil, errors.Wrapf(err, "write node %d", i)
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
keys[i] = ki
|
| 299 |
+
totalWritten = ki.ValueEnd
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
return keys, nil
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
func totalKeyAndValueSize(in []*binarySearchNode) int {
|
| 306 |
+
var sum int
|
| 307 |
+
for _, n := range in {
|
| 308 |
+
sum += len(n.value)
|
| 309 |
+
sum += len(n.key)
|
| 310 |
+
for _, sec := range n.secondaryKeys {
|
| 311 |
+
sum += len(sec)
|
| 312 |
+
}
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
return sum
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
func totalValueSizeCollection(in []*binarySearchNodeMulti) int {
|
| 319 |
+
var sum int
|
| 320 |
+
for _, n := range in {
|
| 321 |
+
sum += 8 // uint64 to indicate array length
|
| 322 |
+
for _, v := range n.values {
|
| 323 |
+
sum += 1 // bool to indicate value tombstone
|
| 324 |
+
sum += 8 // uint64 to indicate value length
|
| 325 |
+
sum += len(v.value)
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
sum += 4 // uint32 to indicate key size
|
| 329 |
+
sum += len(n.key)
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
return sum
|
| 333 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_inverted.go
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"bufio"
|
| 16 |
+
"bytes"
|
| 17 |
+
"encoding/binary"
|
| 18 |
+
"encoding/gob"
|
| 19 |
+
"fmt"
|
| 20 |
+
"math"
|
| 21 |
+
|
| 22 |
+
"github.com/prometheus/client_golang/prometheus"
|
| 23 |
+
"github.com/weaviate/sroar"
|
| 24 |
+
"github.com/weaviate/weaviate/adapters/repos/db/compactor"
|
| 25 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 26 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/varenc"
|
| 27 |
+
"github.com/weaviate/weaviate/entities/diskio"
|
| 28 |
+
"github.com/weaviate/weaviate/usecases/config"
|
| 29 |
+
"github.com/weaviate/weaviate/usecases/monitoring"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
func (m *Memtable) flushDataInverted(f *segmentindex.SegmentFile, ogF *diskio.MeteredWriter, bufw *bufio.Writer) ([]segmentindex.Key, *sroar.Bitmap, error) {
|
| 33 |
+
m.RLock()
|
| 34 |
+
flatA := m.keyMap.flattenInOrder()
|
| 35 |
+
m.RUnlock()
|
| 36 |
+
|
| 37 |
+
// by encoding each map pair we can force the same structure as for a
|
| 38 |
+
// collection, which means we can reuse the same flushing logic
|
| 39 |
+
flat := make([]*binarySearchNodeMap, len(flatA))
|
| 40 |
+
|
| 41 |
+
actuallyWritten := 0
|
| 42 |
+
actuallyWrittenKeys := make(map[string]struct{})
|
| 43 |
+
tombstones := m.tombstones
|
| 44 |
+
|
| 45 |
+
docIdsLengths := make(map[uint64]uint32)
|
| 46 |
+
propLengthSum := uint64(0)
|
| 47 |
+
propLengthCount := uint64(0)
|
| 48 |
+
|
| 49 |
+
for i, mapNode := range flatA {
|
| 50 |
+
flat[i] = &binarySearchNodeMap{
|
| 51 |
+
key: mapNode.key,
|
| 52 |
+
values: make([]MapPair, 0, len(mapNode.values)),
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
for j := range mapNode.values {
|
| 56 |
+
docId := binary.BigEndian.Uint64(mapNode.values[j].Key)
|
| 57 |
+
if !mapNode.values[j].Tombstone {
|
| 58 |
+
fieldLength := math.Float32frombits(binary.LittleEndian.Uint32(mapNode.values[j].Value[4:]))
|
| 59 |
+
flat[i].values = append(flat[i].values, mapNode.values[j])
|
| 60 |
+
actuallyWritten++
|
| 61 |
+
actuallyWrittenKeys[string(mapNode.key)] = struct{}{}
|
| 62 |
+
if _, ok := docIdsLengths[docId]; !ok {
|
| 63 |
+
propLengthSum += uint64(fieldLength)
|
| 64 |
+
propLengthCount++
|
| 65 |
+
}
|
| 66 |
+
docIdsLengths[docId] = uint32(fieldLength)
|
| 67 |
+
} else {
|
| 68 |
+
tombstones.Set(docId)
|
| 69 |
+
}
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
// weighted average of m.averagePropLength and the average of the current flush
|
| 75 |
+
// averaged by propLengthCount and m.propLengthCount
|
| 76 |
+
if m.averagePropLength == 0 {
|
| 77 |
+
m.averagePropLength = float64(propLengthSum) / float64(propLengthCount)
|
| 78 |
+
m.propLengthCount = propLengthCount
|
| 79 |
+
} else {
|
| 80 |
+
m.averagePropLength = (m.averagePropLength*float64(m.propLengthCount) + float64(propLengthSum)) / float64(m.propLengthCount+propLengthCount)
|
| 81 |
+
m.propLengthCount += propLengthCount
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
tombstoneBuffer := make([]byte, 0)
|
| 85 |
+
if !tombstones.IsEmpty() {
|
| 86 |
+
tombstoneBuffer = tombstones.ToBuffer()
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
header := segmentindex.Header{
|
| 90 |
+
Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation),
|
| 91 |
+
IndexStart: 0, // will be updated later
|
| 92 |
+
Level: 0, // always level zero on a new one
|
| 93 |
+
SecondaryIndices: m.secondaryIndices,
|
| 94 |
+
Strategy: SegmentStrategyFromString(StrategyInverted),
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
headerInverted := segmentindex.HeaderInverted{
|
| 98 |
+
KeysOffset: uint64(segmentindex.HeaderSize + segmentindex.SegmentInvertedDefaultHeaderSize + segmentindex.SegmentInvertedDefaultFieldCount),
|
| 99 |
+
TombstoneOffset: 0,
|
| 100 |
+
PropertyLengthsOffset: 0,
|
| 101 |
+
Version: 0,
|
| 102 |
+
BlockSize: uint8(segmentindex.SegmentInvertedDefaultBlockSize),
|
| 103 |
+
DataFieldCount: uint8(segmentindex.SegmentInvertedDefaultFieldCount),
|
| 104 |
+
DataFields: []varenc.VarEncDataType{varenc.DeltaVarIntUint64, varenc.VarIntUint64},
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
docIdEncoder := varenc.GetVarEncEncoder64(headerInverted.DataFields[0])
|
| 108 |
+
tfEncoder := varenc.GetVarEncEncoder64(headerInverted.DataFields[1])
|
| 109 |
+
docIdEncoder.Init(segmentindex.SegmentInvertedDefaultBlockSize)
|
| 110 |
+
tfEncoder.Init(segmentindex.SegmentInvertedDefaultBlockSize)
|
| 111 |
+
|
| 112 |
+
headerEmpty := make([]byte, headerInverted.KeysOffset)
|
| 113 |
+
if _, err := bufw.Write(headerEmpty); err != nil {
|
| 114 |
+
return nil, nil, err
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
totalWritten := len(headerEmpty)
|
| 118 |
+
keysStartOffset := totalWritten
|
| 119 |
+
|
| 120 |
+
buf := make([]byte, 8)
|
| 121 |
+
|
| 122 |
+
keys := make([]segmentindex.Key, len(flat))
|
| 123 |
+
actuallyWritten = 0
|
| 124 |
+
|
| 125 |
+
bw := f.BodyWriter()
|
| 126 |
+
if bw == nil {
|
| 127 |
+
return nil, nil, fmt.Errorf("segment file body writer is nil, cannot write inverted index")
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
for _, mapNode := range flat {
|
| 131 |
+
if len(mapNode.values) > 0 {
|
| 132 |
+
|
| 133 |
+
ki := segmentindex.Key{
|
| 134 |
+
Key: mapNode.key,
|
| 135 |
+
ValueStart: totalWritten,
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
b := config.DefaultBM25b
|
| 139 |
+
k1 := config.DefaultBM25k1
|
| 140 |
+
if m.bm25config != nil {
|
| 141 |
+
b = m.bm25config.B
|
| 142 |
+
k1 = m.bm25config.K1
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
blocksEncoded, _ := createAndEncodeBlocksWithLengths(mapNode.values, docIdEncoder, tfEncoder, float64(b), float64(k1), m.averagePropLength)
|
| 146 |
+
|
| 147 |
+
if _, err := bw.Write(blocksEncoded); err != nil {
|
| 148 |
+
return nil, nil, err
|
| 149 |
+
}
|
| 150 |
+
totalWritten += len(blocksEncoded)
|
| 151 |
+
|
| 152 |
+
// write key length
|
| 153 |
+
binary.LittleEndian.PutUint32(buf, uint32(len(mapNode.key)))
|
| 154 |
+
if _, err := bw.Write(buf[:4]); err != nil {
|
| 155 |
+
return nil, nil, err
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
totalWritten += 4
|
| 159 |
+
|
| 160 |
+
// write key
|
| 161 |
+
if _, err := bw.Write(mapNode.key); err != nil {
|
| 162 |
+
return nil, nil, err
|
| 163 |
+
}
|
| 164 |
+
totalWritten += len(mapNode.key)
|
| 165 |
+
|
| 166 |
+
ki.ValueEnd = totalWritten
|
| 167 |
+
|
| 168 |
+
keys[actuallyWritten] = ki
|
| 169 |
+
actuallyWritten++
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
tombstoneOffset := totalWritten
|
| 174 |
+
|
| 175 |
+
binary.LittleEndian.PutUint64(buf, uint64(len(tombstoneBuffer)))
|
| 176 |
+
if _, err := bw.Write(buf); err != nil {
|
| 177 |
+
return nil, nil, err
|
| 178 |
+
}
|
| 179 |
+
totalWritten += 8
|
| 180 |
+
|
| 181 |
+
if _, err := bw.Write(tombstoneBuffer); err != nil {
|
| 182 |
+
return nil, nil, err
|
| 183 |
+
}
|
| 184 |
+
totalWritten += len(tombstoneBuffer)
|
| 185 |
+
propLengthsOffset := totalWritten
|
| 186 |
+
|
| 187 |
+
b := new(bytes.Buffer)
|
| 188 |
+
|
| 189 |
+
propLengthAvg := float64(propLengthSum) / float64(propLengthCount)
|
| 190 |
+
|
| 191 |
+
binary.LittleEndian.PutUint64(buf, math.Float64bits(propLengthAvg))
|
| 192 |
+
if _, err := bw.Write(buf); err != nil {
|
| 193 |
+
return nil, nil, err
|
| 194 |
+
}
|
| 195 |
+
totalWritten += 8
|
| 196 |
+
|
| 197 |
+
binary.LittleEndian.PutUint64(buf, propLengthCount)
|
| 198 |
+
if _, err := bw.Write(buf); err != nil {
|
| 199 |
+
return nil, nil, err
|
| 200 |
+
}
|
| 201 |
+
totalWritten += 8
|
| 202 |
+
|
| 203 |
+
e := gob.NewEncoder(b)
|
| 204 |
+
|
| 205 |
+
// Encoding the map
|
| 206 |
+
err := e.Encode(docIdsLengths)
|
| 207 |
+
if err != nil {
|
| 208 |
+
return nil, nil, err
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
binary.LittleEndian.PutUint64(buf, uint64(b.Len()))
|
| 212 |
+
if _, err := bw.Write(buf); err != nil {
|
| 213 |
+
return nil, nil, err
|
| 214 |
+
}
|
| 215 |
+
totalWritten += 8
|
| 216 |
+
|
| 217 |
+
if _, err := bw.Write(b.Bytes()); err != nil {
|
| 218 |
+
return nil, nil, err
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
totalWritten += b.Len()
|
| 222 |
+
|
| 223 |
+
treeOffset := totalWritten
|
| 224 |
+
|
| 225 |
+
header.IndexStart = uint64(treeOffset)
|
| 226 |
+
|
| 227 |
+
headerInverted.KeysOffset = uint64(keysStartOffset)
|
| 228 |
+
headerInverted.TombstoneOffset = uint64(tombstoneOffset)
|
| 229 |
+
headerInverted.PropertyLengthsOffset = uint64(propLengthsOffset)
|
| 230 |
+
|
| 231 |
+
f.SetHeader(&header)
|
| 232 |
+
f.SetHeaderInverted(&headerInverted)
|
| 233 |
+
|
| 234 |
+
indexes := &segmentindex.Indexes{
|
| 235 |
+
Keys: keys,
|
| 236 |
+
SecondaryIndexCount: m.secondaryIndices,
|
| 237 |
+
ScratchSpacePath: m.path + ".scratch.d",
|
| 238 |
+
ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{
|
| 239 |
+
"strategy": m.strategy,
|
| 240 |
+
"operation": "writeIndices",
|
| 241 |
+
}),
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
if _, err := f.WriteIndexes(indexes, int64(m.size)); err != nil {
|
| 245 |
+
return nil, nil, err
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
// flush buffered, so we can safely seek on underlying writer
|
| 249 |
+
|
| 250 |
+
if err := bufw.Flush(); err != nil {
|
| 251 |
+
return nil, nil, fmt.Errorf("flush buffered: %w", err)
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
version := segmentindex.ChooseHeaderVersion(m.enableChecksumValidation)
|
| 255 |
+
if err := compactor.WriteHeaders(nil, ogF, bufw, f, 0, version,
|
| 256 |
+
header.SecondaryIndices, header.IndexStart, segmentindex.StrategyInverted, &headerInverted); err != nil {
|
| 257 |
+
return nil, nil, fmt.Errorf("write headers: %w", err)
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
return keys[:actuallyWritten], tombstones, nil
|
| 261 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set.go
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"fmt"
|
| 16 |
+
|
| 17 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 18 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
func (m *Memtable) flushDataRoaringSet(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) {
|
| 22 |
+
flat := m.roaringSet.FlattenInOrder()
|
| 23 |
+
|
| 24 |
+
totalDataLength := totalPayloadSizeRoaringSet(flat)
|
| 25 |
+
header := &segmentindex.Header{
|
| 26 |
+
IndexStart: uint64(totalDataLength + segmentindex.HeaderSize),
|
| 27 |
+
Level: 0, // always level zero on a new one
|
| 28 |
+
Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation),
|
| 29 |
+
SecondaryIndices: 0,
|
| 30 |
+
Strategy: segmentindex.StrategyRoaringSet,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
n, err := f.WriteHeader(header)
|
| 34 |
+
if err != nil {
|
| 35 |
+
return nil, err
|
| 36 |
+
}
|
| 37 |
+
headerSize := int(n)
|
| 38 |
+
keys := make([]segmentindex.Key, len(flat))
|
| 39 |
+
|
| 40 |
+
totalWritten := headerSize
|
| 41 |
+
for i, node := range flat {
|
| 42 |
+
sn, err := roaringset.NewSegmentNode(node.Key, node.Value.Additions,
|
| 43 |
+
node.Value.Deletions)
|
| 44 |
+
if err != nil {
|
| 45 |
+
return nil, fmt.Errorf("create segment node: %w", err)
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
ki, err := sn.KeyIndexAndWriteTo(f.BodyWriter(), totalWritten)
|
| 49 |
+
if err != nil {
|
| 50 |
+
return nil, fmt.Errorf("write node %d: %w", i, err)
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
keys[i] = ki
|
| 54 |
+
totalWritten = ki.ValueEnd
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
return keys, nil
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
func totalPayloadSizeRoaringSet(in []*roaringset.BinarySearchNode) int {
|
| 61 |
+
var sum int
|
| 62 |
+
for _, n := range in {
|
| 63 |
+
sum += 8 // uint64 to segment length
|
| 64 |
+
sum += 8 // uint64 to indicate length of additions bitmap
|
| 65 |
+
sum += len(n.Value.Additions.ToBuffer())
|
| 66 |
+
sum += 8 // uint64 to indicate length of deletions bitmap
|
| 67 |
+
sum += len(n.Value.Deletions.ToBuffer())
|
| 68 |
+
sum += 4 // uint32 to indicate key size
|
| 69 |
+
sum += len(n.Key)
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
return sum
|
| 73 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set_range.go
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"fmt"
|
| 16 |
+
|
| 17 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 18 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
func (m *Memtable) flushDataRoaringSetRange(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) {
|
| 22 |
+
nodes := m.roaringSetRange.Nodes()
|
| 23 |
+
|
| 24 |
+
totalDataLength := totalPayloadSizeRoaringSetRange(nodes)
|
| 25 |
+
header := &segmentindex.Header{
|
| 26 |
+
IndexStart: uint64(totalDataLength + segmentindex.HeaderSize),
|
| 27 |
+
Level: 0, // always level zero on a new one
|
| 28 |
+
Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation),
|
| 29 |
+
SecondaryIndices: 0,
|
| 30 |
+
Strategy: segmentindex.StrategyRoaringSetRange,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
_, err := f.WriteHeader(header)
|
| 34 |
+
if err != nil {
|
| 35 |
+
return nil, err
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
for i, node := range nodes {
|
| 39 |
+
sn, err := roaringsetrange.NewSegmentNode(node.Key, node.Additions, node.Deletions)
|
| 40 |
+
if err != nil {
|
| 41 |
+
return nil, fmt.Errorf("create segment node: %w", err)
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
_, err = f.BodyWriter().Write(sn.ToBuffer())
|
| 45 |
+
if err != nil {
|
| 46 |
+
return nil, fmt.Errorf("write segment node %d: %w", i, err)
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
return make([]segmentindex.Key, 0), nil
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
func totalPayloadSizeRoaringSetRange(nodes []*roaringsetrange.MemtableNode) int {
|
| 54 |
+
var sum int
|
| 55 |
+
for _, node := range nodes {
|
| 56 |
+
sum += 8 // uint64 to segment length
|
| 57 |
+
sum += 1 // key (fixed size)
|
| 58 |
+
sum += 8 // uint64 to indicate length of additions bitmap
|
| 59 |
+
sum += len(node.Additions.ToBuffer())
|
| 60 |
+
|
| 61 |
+
if node.Key == 0 {
|
| 62 |
+
sum += 8 // uint64 to indicate length of deletions bitmap
|
| 63 |
+
sum += len(node.Deletions.ToBuffer())
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
return sum
|
| 68 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_metrics.go
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
type memtableMetrics struct {
|
| 15 |
+
put NsObserver
|
| 16 |
+
setTombstone NsObserver
|
| 17 |
+
append NsObserver
|
| 18 |
+
appendMapSorted NsObserver
|
| 19 |
+
get NsObserver
|
| 20 |
+
getBySecondary NsObserver
|
| 21 |
+
getMap NsObserver
|
| 22 |
+
getCollection NsObserver
|
| 23 |
+
size Setter
|
| 24 |
+
writeMemtable BytesWriteObserver
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
// newMemtableMetrics curries the prometheus-functions just once to make sure
|
| 28 |
+
// they don't have to be curried on the hotpath where we this would lead to a
|
| 29 |
+
// lot of allocations.
|
| 30 |
+
func newMemtableMetrics(metrics *Metrics, path, strategy string) *memtableMetrics {
|
| 31 |
+
return &memtableMetrics{
|
| 32 |
+
put: metrics.MemtableOpObserver(path, strategy, "put"),
|
| 33 |
+
setTombstone: metrics.MemtableOpObserver(path, strategy, "setTombstone"),
|
| 34 |
+
append: metrics.MemtableOpObserver(path, strategy, "append"),
|
| 35 |
+
appendMapSorted: metrics.MemtableOpObserver(path, strategy, "appendMapSorted"),
|
| 36 |
+
get: metrics.MemtableOpObserver(path, strategy, "get"),
|
| 37 |
+
getBySecondary: metrics.MemtableOpObserver(path, strategy, "getBySecondary"),
|
| 38 |
+
getMap: metrics.MemtableOpObserver(path, strategy, "getMap"),
|
| 39 |
+
getCollection: metrics.MemtableOpObserver(path, strategy, "getCollection"),
|
| 40 |
+
size: metrics.MemtableSizeSetter(path, strategy),
|
| 41 |
+
writeMemtable: metrics.MemtableWriteObserver(strategy, "flushMemtable"),
|
| 42 |
+
}
|
| 43 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set.go
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"github.com/pkg/errors"
|
| 16 |
+
"github.com/weaviate/sroar"
|
| 17 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
func (m *Memtable) roaringSetAddOne(key []byte, value uint64) error {
|
| 21 |
+
return m.roaringSetAddList(key, []uint64{value})
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
func (m *Memtable) roaringSetAddList(key []byte, values []uint64) error {
|
| 25 |
+
if err := CheckStrategyRoaringSet(m.strategy); err != nil {
|
| 26 |
+
return err
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
m.Lock()
|
| 30 |
+
defer m.Unlock()
|
| 31 |
+
|
| 32 |
+
if err := m.roaringSetAddCommitLog(key, values, []uint64{}); err != nil {
|
| 33 |
+
return err
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
m.roaringSet.Insert(key, roaringset.Insert{Additions: values})
|
| 37 |
+
|
| 38 |
+
m.roaringSetAdjustMeta(len(values))
|
| 39 |
+
return nil
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
func (m *Memtable) roaringSetAddBitmap(key []byte, bm *sroar.Bitmap) error {
|
| 43 |
+
if err := CheckStrategyRoaringSet(m.strategy); err != nil {
|
| 44 |
+
return err
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
m.Lock()
|
| 48 |
+
defer m.Unlock()
|
| 49 |
+
|
| 50 |
+
if err := m.roaringSetAddCommitLog(key, bm.ToArray(), []uint64{}); err != nil {
|
| 51 |
+
return err
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
m.roaringSet.Insert(key, roaringset.Insert{Additions: bm.ToArray()})
|
| 55 |
+
|
| 56 |
+
m.roaringSetAdjustMeta(bm.GetCardinality())
|
| 57 |
+
return nil
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
func (m *Memtable) roaringSetRemoveOne(key []byte, value uint64) error {
|
| 61 |
+
return m.roaringSetRemoveList(key, []uint64{value})
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
func (m *Memtable) roaringSetRemoveList(key []byte, values []uint64) error {
|
| 65 |
+
if err := CheckStrategyRoaringSet(m.strategy); err != nil {
|
| 66 |
+
return err
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
m.Lock()
|
| 70 |
+
defer m.Unlock()
|
| 71 |
+
|
| 72 |
+
if err := m.roaringSetAddCommitLog(key, []uint64{}, values); err != nil {
|
| 73 |
+
return err
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
m.roaringSet.Insert(key, roaringset.Insert{Deletions: values})
|
| 77 |
+
|
| 78 |
+
m.roaringSetAdjustMeta(len(values))
|
| 79 |
+
return nil
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
func (m *Memtable) roaringSetRemoveBitmap(key []byte, bm *sroar.Bitmap) error {
|
| 83 |
+
if err := CheckStrategyRoaringSet(m.strategy); err != nil {
|
| 84 |
+
return err
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
m.Lock()
|
| 88 |
+
defer m.Unlock()
|
| 89 |
+
|
| 90 |
+
if err := m.roaringSetAddCommitLog(key, []uint64{}, bm.ToArray()); err != nil {
|
| 91 |
+
return err
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
m.roaringSet.Insert(key, roaringset.Insert{Deletions: bm.ToArray()})
|
| 95 |
+
|
| 96 |
+
m.roaringSetAdjustMeta(bm.GetCardinality())
|
| 97 |
+
return nil
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
func (m *Memtable) roaringSetAddRemoveSlices(key []byte, additions []uint64, deletions []uint64) error {
|
| 101 |
+
if err := CheckStrategyRoaringSet(m.strategy); err != nil {
|
| 102 |
+
return err
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
m.Lock()
|
| 106 |
+
defer m.Unlock()
|
| 107 |
+
|
| 108 |
+
if err := m.roaringSetAddCommitLog(key, additions, deletions); err != nil {
|
| 109 |
+
return err
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
m.roaringSet.Insert(key, roaringset.Insert{
|
| 113 |
+
Additions: additions,
|
| 114 |
+
Deletions: deletions,
|
| 115 |
+
})
|
| 116 |
+
|
| 117 |
+
m.roaringSetAdjustMeta(len(additions) + len(deletions))
|
| 118 |
+
return nil
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
// returned bitmaps are cloned and safe to mutate
|
| 122 |
+
func (m *Memtable) roaringSetGet(key []byte) (roaringset.BitmapLayer, error) {
|
| 123 |
+
if err := CheckStrategyRoaringSet(m.strategy); err != nil {
|
| 124 |
+
return roaringset.BitmapLayer{}, err
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
m.RLock()
|
| 128 |
+
defer m.RUnlock()
|
| 129 |
+
|
| 130 |
+
return m.roaringSet.Get(key)
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
func (m *Memtable) roaringSetAdjustMeta(entriesChanged int) {
|
| 134 |
+
// in the worst case roaring bitmaps take 2 bytes per entry. A reasonable
|
| 135 |
+
// estimation is therefore to take the changed entries and multiply them by
|
| 136 |
+
// 2.
|
| 137 |
+
m.size += uint64(entriesChanged * 2)
|
| 138 |
+
m.metrics.size(m.size)
|
| 139 |
+
m.updateDirtyAt()
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
func (m *Memtable) roaringSetAddCommitLog(key []byte, additions []uint64, deletions []uint64) error {
|
| 143 |
+
if node, err := roaringset.NewSegmentNodeList(key, additions, deletions); err != nil {
|
| 144 |
+
return errors.Wrap(err, "create node for commit log")
|
| 145 |
+
} else if err := m.commitlog.add(node); err != nil {
|
| 146 |
+
return errors.Wrap(err, "add node to commit log")
|
| 147 |
+
}
|
| 148 |
+
return nil
|
| 149 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range.go
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"encoding/binary"
|
| 16 |
+
|
| 17 |
+
"github.com/pkg/errors"
|
| 18 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
func (m *Memtable) roaringSetRangeAdd(key uint64, values ...uint64) error {
|
| 22 |
+
if err := CheckStrategyRoaringSetRange(m.strategy); err != nil {
|
| 23 |
+
return err
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
m.Lock()
|
| 27 |
+
defer m.Unlock()
|
| 28 |
+
|
| 29 |
+
if err := m.roaringSetRangeAddCommitLog(key, values, nil); err != nil {
|
| 30 |
+
return err
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
m.roaringSetRange.Insert(key, values)
|
| 34 |
+
|
| 35 |
+
m.roaringSetRangeAdjustMeta(len(values))
|
| 36 |
+
return nil
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
func (m *Memtable) roaringSetRangeRemove(key uint64, values ...uint64) error {
|
| 40 |
+
if err := CheckStrategyRoaringSetRange(m.strategy); err != nil {
|
| 41 |
+
return err
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
m.Lock()
|
| 45 |
+
defer m.Unlock()
|
| 46 |
+
|
| 47 |
+
if err := m.roaringSetRangeAddCommitLog(key, nil, values); err != nil {
|
| 48 |
+
return err
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
m.roaringSetRange.Delete(key, values)
|
| 52 |
+
|
| 53 |
+
m.roaringSetRangeAdjustMeta(len(values))
|
| 54 |
+
return nil
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
func (m *Memtable) roaringSetRangeAddRemove(key uint64, additions []uint64, deletions []uint64) error {
|
| 58 |
+
if err := CheckStrategyRoaringSetRange(m.strategy); err != nil {
|
| 59 |
+
return err
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
m.Lock()
|
| 63 |
+
defer m.Unlock()
|
| 64 |
+
|
| 65 |
+
if err := m.roaringSetRangeAddCommitLog(key, additions, deletions); err != nil {
|
| 66 |
+
return err
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
m.roaringSetRange.Delete(key, deletions)
|
| 70 |
+
m.roaringSetRange.Insert(key, additions)
|
| 71 |
+
|
| 72 |
+
m.roaringSetRangeAdjustMeta(len(additions) + len(deletions))
|
| 73 |
+
return nil
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
func (m *Memtable) roaringSetRangeAdjustMeta(entriesChanged int) {
|
| 77 |
+
// TODO roaring-set-range new estimations
|
| 78 |
+
|
| 79 |
+
// in the worst case roaring bitmaps take 2 bytes per entry. A reasonable
|
| 80 |
+
// estimation is therefore to take the changed entries and multiply them by
|
| 81 |
+
// 2.
|
| 82 |
+
m.size += uint64(entriesChanged * 2)
|
| 83 |
+
m.metrics.size(m.size)
|
| 84 |
+
m.updateDirtyAt()
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
func (m *Memtable) roaringSetRangeAddCommitLog(key uint64, additions []uint64, deletions []uint64) error {
|
| 88 |
+
// TODO roaring-set-range improved commit log
|
| 89 |
+
|
| 90 |
+
keyBuf := make([]byte, 8)
|
| 91 |
+
binary.BigEndian.PutUint64(keyBuf, key)
|
| 92 |
+
if node, err := roaringset.NewSegmentNodeList(keyBuf, additions, deletions); err != nil {
|
| 93 |
+
return errors.Wrap(err, "create node for commit log")
|
| 94 |
+
} else if err := m.commitlog.add(node); err != nil {
|
| 95 |
+
return errors.Wrap(err, "add node to commit log")
|
| 96 |
+
}
|
| 97 |
+
return nil
|
| 98 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range_test.go
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"context"
|
| 16 |
+
"math/rand"
|
| 17 |
+
"path"
|
| 18 |
+
"testing"
|
| 19 |
+
"time"
|
| 20 |
+
|
| 21 |
+
"github.com/sirupsen/logrus/hooks/test"
|
| 22 |
+
"github.com/stretchr/testify/assert"
|
| 23 |
+
"github.com/stretchr/testify/require"
|
| 24 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
|
| 25 |
+
"github.com/weaviate/weaviate/entities/filters"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
func TestMemtableRoaringSetRange(t *testing.T) {
|
| 29 |
+
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
| 30 |
+
logger, _ := test.NewNullLogger()
|
| 31 |
+
memPath := func() string {
|
| 32 |
+
return path.Join(t.TempDir(), "memtable")
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
t.Run("concurrent writes and search", func(t *testing.T) {
|
| 36 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSetRange, 0)
|
| 37 |
+
require.NoError(t, err)
|
| 38 |
+
m, err := newMemtable(memPath(), StrategyRoaringSetRange, 0, cl, nil, logger, false, nil, false, nil)
|
| 39 |
+
require.Nil(t, err)
|
| 40 |
+
|
| 41 |
+
addKeyVals := func(k uint64) error {
|
| 42 |
+
return m.roaringSetRangeAdd(k, k+1000, k+2000, k+3000)
|
| 43 |
+
}
|
| 44 |
+
removeKeyVals := func(k uint64) error {
|
| 45 |
+
return m.roaringSetRangeRemove(k, k+1000, k+2000, k+3000)
|
| 46 |
+
}
|
| 47 |
+
assertRead_LTE4_GT7 := func(t *testing.T, reader roaringsetrange.InnerReader) {
|
| 48 |
+
expAddLTE4 := []uint64{
|
| 49 |
+
1000, 2000, 3000,
|
| 50 |
+
1002, 2002, 3002,
|
| 51 |
+
1004, 2004, 3004,
|
| 52 |
+
}
|
| 53 |
+
expAddGT7 := []uint64{
|
| 54 |
+
1008, 2008, 3008,
|
| 55 |
+
}
|
| 56 |
+
expDel := []uint64{
|
| 57 |
+
1000, 2000, 3000,
|
| 58 |
+
1001, 2001, 3001,
|
| 59 |
+
1002, 2002, 3002,
|
| 60 |
+
1003, 2003, 3003,
|
| 61 |
+
1004, 2004, 3004,
|
| 62 |
+
1005, 2005, 3005,
|
| 63 |
+
1006, 2006, 3006,
|
| 64 |
+
1007, 2007, 3007,
|
| 65 |
+
1008, 2008, 3008,
|
| 66 |
+
1009, 2009, 3009,
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
layerLTE4, release, err := reader.Read(context.Background(), 4, filters.OperatorLessThanEqual)
|
| 70 |
+
require.NoError(t, err)
|
| 71 |
+
defer release()
|
| 72 |
+
|
| 73 |
+
layerGT7, release, err := reader.Read(context.Background(), 7, filters.OperatorGreaterThan)
|
| 74 |
+
require.NoError(t, err)
|
| 75 |
+
defer release()
|
| 76 |
+
|
| 77 |
+
assert.ElementsMatch(t, expAddLTE4, layerLTE4.Additions.ToArray())
|
| 78 |
+
assert.ElementsMatch(t, expDel, layerLTE4.Deletions.ToArray())
|
| 79 |
+
assert.ElementsMatch(t, expAddGT7, layerGT7.Additions.ToArray())
|
| 80 |
+
assert.ElementsMatch(t, expDel, layerGT7.Deletions.ToArray())
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
// populate with initial data
|
| 84 |
+
for i := uint64(0); i < 10; i = i + 2 {
|
| 85 |
+
assert.NoError(t, addKeyVals(i))
|
| 86 |
+
}
|
| 87 |
+
for i := uint64(1); i < 10; i = i + 2 {
|
| 88 |
+
assert.NoError(t, removeKeyVals(i))
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
// create reader
|
| 92 |
+
reader := m.newRoaringSetRangeReader()
|
| 93 |
+
|
| 94 |
+
// assert data
|
| 95 |
+
assertRead_LTE4_GT7(t, reader)
|
| 96 |
+
|
| 97 |
+
// concurrently mutate memtable
|
| 98 |
+
chStart := make(chan struct{})
|
| 99 |
+
chFinish := make(chan struct{})
|
| 100 |
+
go func() {
|
| 101 |
+
chStart <- struct{}{}
|
| 102 |
+
for {
|
| 103 |
+
select {
|
| 104 |
+
case <-chFinish:
|
| 105 |
+
return
|
| 106 |
+
default:
|
| 107 |
+
addKeyVals(uint64(rnd.Int31n(1000)))
|
| 108 |
+
removeKeyVals(uint64(rnd.Int31n(1000)))
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
}()
|
| 112 |
+
|
| 113 |
+
// assert search results do not contain muted data
|
| 114 |
+
<-chStart
|
| 115 |
+
for i := 0; i < 256; i++ {
|
| 116 |
+
assertRead_LTE4_GT7(t, reader)
|
| 117 |
+
}
|
| 118 |
+
chFinish <- struct{}{}
|
| 119 |
+
})
|
| 120 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_test.go
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"path"
|
| 16 |
+
"testing"
|
| 17 |
+
|
| 18 |
+
"github.com/sirupsen/logrus/hooks/test"
|
| 19 |
+
"github.com/stretchr/testify/assert"
|
| 20 |
+
"github.com/stretchr/testify/require"
|
| 21 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
func TestMemtableRoaringSet(t *testing.T) {
|
| 25 |
+
logger, _ := test.NewNullLogger()
|
| 26 |
+
memPath := func() string {
|
| 27 |
+
return path.Join(t.TempDir(), "fake")
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
t.Run("inserting individual entries", func(t *testing.T) {
|
| 31 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0)
|
| 32 |
+
require.NoError(t, err)
|
| 33 |
+
|
| 34 |
+
m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil)
|
| 35 |
+
require.Nil(t, err)
|
| 36 |
+
|
| 37 |
+
key1, key2 := []byte("key1"), []byte("key2")
|
| 38 |
+
|
| 39 |
+
assert.Nil(t, m.roaringSetAddOne(key1, 1))
|
| 40 |
+
assert.Nil(t, m.roaringSetAddOne(key1, 2))
|
| 41 |
+
assert.Nil(t, m.roaringSetAddOne(key2, 3))
|
| 42 |
+
assert.Nil(t, m.roaringSetAddOne(key2, 4))
|
| 43 |
+
assert.Greater(t, m.Size(), uint64(0))
|
| 44 |
+
|
| 45 |
+
setKey1, err := m.roaringSetGet(key1)
|
| 46 |
+
require.Nil(t, err)
|
| 47 |
+
assert.True(t, setKey1.Additions.Contains(1))
|
| 48 |
+
assert.True(t, setKey1.Additions.Contains(2))
|
| 49 |
+
assert.False(t, setKey1.Additions.Contains(3))
|
| 50 |
+
assert.False(t, setKey1.Additions.Contains(4))
|
| 51 |
+
|
| 52 |
+
setKey2, err := m.roaringSetGet(key2)
|
| 53 |
+
require.Nil(t, err)
|
| 54 |
+
assert.False(t, setKey2.Additions.Contains(1))
|
| 55 |
+
assert.False(t, setKey2.Additions.Contains(2))
|
| 56 |
+
assert.True(t, setKey2.Additions.Contains(3))
|
| 57 |
+
assert.True(t, setKey2.Additions.Contains(4))
|
| 58 |
+
|
| 59 |
+
require.Nil(t, m.commitlog.close())
|
| 60 |
+
})
|
| 61 |
+
|
| 62 |
+
t.Run("inserting lists", func(t *testing.T) {
|
| 63 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0)
|
| 64 |
+
require.NoError(t, err)
|
| 65 |
+
|
| 66 |
+
m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil)
|
| 67 |
+
require.Nil(t, err)
|
| 68 |
+
|
| 69 |
+
key1, key2 := []byte("key1"), []byte("key2")
|
| 70 |
+
|
| 71 |
+
assert.Nil(t, m.roaringSetAddList(key1, []uint64{1, 2}))
|
| 72 |
+
assert.Nil(t, m.roaringSetAddList(key2, []uint64{3, 4}))
|
| 73 |
+
assert.Greater(t, m.Size(), uint64(0))
|
| 74 |
+
|
| 75 |
+
setKey1, err := m.roaringSetGet(key1)
|
| 76 |
+
require.Nil(t, err)
|
| 77 |
+
assert.True(t, setKey1.Additions.Contains(1))
|
| 78 |
+
assert.True(t, setKey1.Additions.Contains(2))
|
| 79 |
+
assert.False(t, setKey1.Additions.Contains(3))
|
| 80 |
+
assert.False(t, setKey1.Additions.Contains(4))
|
| 81 |
+
|
| 82 |
+
setKey2, err := m.roaringSetGet(key2)
|
| 83 |
+
require.Nil(t, err)
|
| 84 |
+
assert.False(t, setKey2.Additions.Contains(1))
|
| 85 |
+
assert.False(t, setKey2.Additions.Contains(2))
|
| 86 |
+
assert.True(t, setKey2.Additions.Contains(3))
|
| 87 |
+
assert.True(t, setKey2.Additions.Contains(4))
|
| 88 |
+
|
| 89 |
+
require.Nil(t, m.commitlog.close())
|
| 90 |
+
})
|
| 91 |
+
|
| 92 |
+
t.Run("inserting bitmaps", func(t *testing.T) {
|
| 93 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0)
|
| 94 |
+
require.NoError(t, err)
|
| 95 |
+
|
| 96 |
+
m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil)
|
| 97 |
+
require.Nil(t, err)
|
| 98 |
+
|
| 99 |
+
key1, key2 := []byte("key1"), []byte("key2")
|
| 100 |
+
|
| 101 |
+
bm1 := roaringset.NewBitmap(1, 2)
|
| 102 |
+
assert.Nil(t, m.roaringSetAddBitmap(key1, bm1))
|
| 103 |
+
bm2 := roaringset.NewBitmap(3, 4)
|
| 104 |
+
assert.Nil(t, m.roaringSetAddBitmap(key2, bm2))
|
| 105 |
+
assert.Greater(t, m.Size(), uint64(0))
|
| 106 |
+
|
| 107 |
+
setKey1, err := m.roaringSetGet(key1)
|
| 108 |
+
require.Nil(t, err)
|
| 109 |
+
assert.True(t, setKey1.Additions.Contains(1))
|
| 110 |
+
assert.True(t, setKey1.Additions.Contains(2))
|
| 111 |
+
assert.False(t, setKey1.Additions.Contains(3))
|
| 112 |
+
assert.False(t, setKey1.Additions.Contains(4))
|
| 113 |
+
|
| 114 |
+
setKey2, err := m.roaringSetGet(key2)
|
| 115 |
+
require.Nil(t, err)
|
| 116 |
+
assert.False(t, setKey2.Additions.Contains(1))
|
| 117 |
+
assert.False(t, setKey2.Additions.Contains(2))
|
| 118 |
+
assert.True(t, setKey2.Additions.Contains(3))
|
| 119 |
+
assert.True(t, setKey2.Additions.Contains(4))
|
| 120 |
+
|
| 121 |
+
require.Nil(t, m.commitlog.close())
|
| 122 |
+
})
|
| 123 |
+
|
| 124 |
+
t.Run("removing individual entries", func(t *testing.T) {
|
| 125 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0)
|
| 126 |
+
require.NoError(t, err)
|
| 127 |
+
|
| 128 |
+
m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil)
|
| 129 |
+
require.Nil(t, err)
|
| 130 |
+
|
| 131 |
+
key1, key2 := []byte("key1"), []byte("key2")
|
| 132 |
+
|
| 133 |
+
assert.Nil(t, m.roaringSetRemoveOne(key1, 7))
|
| 134 |
+
assert.Nil(t, m.roaringSetRemoveOne(key2, 8))
|
| 135 |
+
assert.Greater(t, m.Size(), uint64(0))
|
| 136 |
+
|
| 137 |
+
setKey1, err := m.roaringSetGet(key1)
|
| 138 |
+
require.Nil(t, err)
|
| 139 |
+
assert.False(t, setKey1.Additions.Contains(7))
|
| 140 |
+
assert.True(t, setKey1.Deletions.Contains(7))
|
| 141 |
+
|
| 142 |
+
setKey2, err := m.roaringSetGet(key2)
|
| 143 |
+
require.Nil(t, err)
|
| 144 |
+
assert.False(t, setKey2.Additions.Contains(8))
|
| 145 |
+
assert.True(t, setKey2.Deletions.Contains(8))
|
| 146 |
+
|
| 147 |
+
require.Nil(t, m.commitlog.close())
|
| 148 |
+
})
|
| 149 |
+
|
| 150 |
+
t.Run("removing lists", func(t *testing.T) {
|
| 151 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0)
|
| 152 |
+
require.NoError(t, err)
|
| 153 |
+
|
| 154 |
+
m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil)
|
| 155 |
+
require.Nil(t, err)
|
| 156 |
+
|
| 157 |
+
key1, key2 := []byte("key1"), []byte("key2")
|
| 158 |
+
|
| 159 |
+
assert.Nil(t, m.roaringSetRemoveList(key1, []uint64{7, 8}))
|
| 160 |
+
assert.Nil(t, m.roaringSetRemoveList(key2, []uint64{9, 10}))
|
| 161 |
+
assert.Greater(t, m.Size(), uint64(0))
|
| 162 |
+
|
| 163 |
+
setKey1, err := m.roaringSetGet(key1)
|
| 164 |
+
require.Nil(t, err)
|
| 165 |
+
assert.Equal(t, 0, setKey1.Additions.GetCardinality())
|
| 166 |
+
assert.Equal(t, 2, setKey1.Deletions.GetCardinality())
|
| 167 |
+
assert.True(t, setKey1.Deletions.Contains(7))
|
| 168 |
+
assert.True(t, setKey1.Deletions.Contains(8))
|
| 169 |
+
|
| 170 |
+
setKey2, err := m.roaringSetGet(key2)
|
| 171 |
+
require.Nil(t, err)
|
| 172 |
+
assert.Equal(t, 0, setKey2.Additions.GetCardinality())
|
| 173 |
+
assert.Equal(t, 2, setKey2.Deletions.GetCardinality())
|
| 174 |
+
assert.True(t, setKey2.Deletions.Contains(9))
|
| 175 |
+
assert.True(t, setKey2.Deletions.Contains(10))
|
| 176 |
+
|
| 177 |
+
require.Nil(t, m.commitlog.close())
|
| 178 |
+
})
|
| 179 |
+
|
| 180 |
+
t.Run("removing bitmaps", func(t *testing.T) {
|
| 181 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0)
|
| 182 |
+
require.NoError(t, err)
|
| 183 |
+
|
| 184 |
+
m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil)
|
| 185 |
+
require.Nil(t, err)
|
| 186 |
+
|
| 187 |
+
key1, key2 := []byte("key1"), []byte("key2")
|
| 188 |
+
|
| 189 |
+
assert.Nil(t, m.roaringSetRemoveBitmap(key1, roaringset.NewBitmap(7, 8)))
|
| 190 |
+
assert.Nil(t, m.roaringSetRemoveBitmap(key2, roaringset.NewBitmap(9, 10)))
|
| 191 |
+
assert.Greater(t, m.Size(), uint64(0))
|
| 192 |
+
|
| 193 |
+
setKey1, err := m.roaringSetGet(key1)
|
| 194 |
+
require.Nil(t, err)
|
| 195 |
+
assert.Equal(t, 0, setKey1.Additions.GetCardinality())
|
| 196 |
+
assert.Equal(t, 2, setKey1.Deletions.GetCardinality())
|
| 197 |
+
assert.True(t, setKey1.Deletions.Contains(7))
|
| 198 |
+
assert.True(t, setKey1.Deletions.Contains(8))
|
| 199 |
+
|
| 200 |
+
setKey2, err := m.roaringSetGet(key2)
|
| 201 |
+
require.Nil(t, err)
|
| 202 |
+
assert.Equal(t, 0, setKey2.Additions.GetCardinality())
|
| 203 |
+
assert.Equal(t, 2, setKey2.Deletions.GetCardinality())
|
| 204 |
+
assert.True(t, setKey2.Deletions.Contains(9))
|
| 205 |
+
assert.True(t, setKey2.Deletions.Contains(10))
|
| 206 |
+
|
| 207 |
+
require.Nil(t, m.commitlog.close())
|
| 208 |
+
})
|
| 209 |
+
|
| 210 |
+
t.Run("adding/removing slices", func(t *testing.T) {
|
| 211 |
+
cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0)
|
| 212 |
+
require.NoError(t, err)
|
| 213 |
+
|
| 214 |
+
m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil)
|
| 215 |
+
require.Nil(t, err)
|
| 216 |
+
|
| 217 |
+
key1, key2 := []byte("key1"), []byte("key2")
|
| 218 |
+
|
| 219 |
+
assert.Nil(t, m.roaringSetAddRemoveSlices(key1,
|
| 220 |
+
[]uint64{1, 2}, []uint64{7, 8}))
|
| 221 |
+
assert.Nil(t, m.roaringSetAddRemoveSlices(key2,
|
| 222 |
+
[]uint64{3, 4}, []uint64{9, 10}))
|
| 223 |
+
assert.Greater(t, m.Size(), uint64(0))
|
| 224 |
+
|
| 225 |
+
setKey1, err := m.roaringSetGet(key1)
|
| 226 |
+
require.Nil(t, err)
|
| 227 |
+
assert.Equal(t, 2, setKey1.Additions.GetCardinality())
|
| 228 |
+
assert.True(t, setKey1.Additions.Contains(1))
|
| 229 |
+
assert.True(t, setKey1.Additions.Contains(2))
|
| 230 |
+
assert.Equal(t, 2, setKey1.Deletions.GetCardinality())
|
| 231 |
+
assert.True(t, setKey1.Deletions.Contains(7))
|
| 232 |
+
assert.True(t, setKey1.Deletions.Contains(8))
|
| 233 |
+
|
| 234 |
+
setKey2, err := m.roaringSetGet(key2)
|
| 235 |
+
require.Nil(t, err)
|
| 236 |
+
assert.Equal(t, 2, setKey2.Additions.GetCardinality())
|
| 237 |
+
assert.True(t, setKey2.Additions.Contains(3))
|
| 238 |
+
assert.True(t, setKey2.Additions.Contains(4))
|
| 239 |
+
assert.Equal(t, 2, setKey2.Deletions.GetCardinality())
|
| 240 |
+
assert.True(t, setKey2.Deletions.Contains(9))
|
| 241 |
+
assert.True(t, setKey2.Deletions.Contains(10))
|
| 242 |
+
|
| 243 |
+
require.Nil(t, m.commitlog.close())
|
| 244 |
+
})
|
| 245 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor.go
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import "time"
|
| 15 |
+
|
| 16 |
+
// if not enough config is provided we can fall back to this reasonable default
|
| 17 |
+
// value
|
| 18 |
+
const reasonableMemtableDefault = 10 * 1024 * 1024
|
| 19 |
+
|
| 20 |
+
type memtableSizeAdvisorCfg struct {
|
| 21 |
+
initial int
|
| 22 |
+
stepSize int
|
| 23 |
+
maxSize int
|
| 24 |
+
minDuration time.Duration
|
| 25 |
+
maxDuration time.Duration
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
type memtableSizeAdvisor struct {
|
| 29 |
+
cfg memtableSizeAdvisorCfg
|
| 30 |
+
active bool
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
func newMemtableSizeAdvisor(cfg memtableSizeAdvisorCfg) *memtableSizeAdvisor {
|
| 34 |
+
a := &memtableSizeAdvisor{
|
| 35 |
+
cfg: cfg,
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
// only activate if initial size, step size, max size, and max duration are
|
| 39 |
+
// given
|
| 40 |
+
if a.cfg.maxSize > 0 && a.cfg.initial > 0 && a.cfg.stepSize > 0 && a.cfg.maxDuration > 0 {
|
| 41 |
+
a.active = true
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
return a
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
func (m memtableSizeAdvisor) Initial() int {
|
| 48 |
+
if m.active {
|
| 49 |
+
return m.cfg.initial
|
| 50 |
+
} else {
|
| 51 |
+
return reasonableMemtableDefault
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
func (m memtableSizeAdvisor) NextTarget(previousTarget int,
|
| 56 |
+
timeSinceFlush time.Duration,
|
| 57 |
+
) (int, bool) {
|
| 58 |
+
if !m.active {
|
| 59 |
+
return reasonableMemtableDefault, false
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
if timeSinceFlush < m.cfg.minDuration {
|
| 63 |
+
next := min(previousTarget+m.cfg.stepSize, m.cfg.maxSize)
|
| 64 |
+
return next, next != previousTarget
|
| 65 |
+
}
|
| 66 |
+
if timeSinceFlush > m.cfg.maxDuration {
|
| 67 |
+
next := max(previousTarget-m.cfg.stepSize, m.cfg.initial)
|
| 68 |
+
return next, next != previousTarget
|
| 69 |
+
}
|
| 70 |
+
return previousTarget, false
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
func min(a, b int) int {
|
| 74 |
+
if a <= b {
|
| 75 |
+
return a
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
return b
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
func max(a, b int) int {
|
| 82 |
+
if a >= b {
|
| 83 |
+
return a
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
return b
|
| 87 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor_test.go
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"testing"
|
| 16 |
+
"time"
|
| 17 |
+
|
| 18 |
+
"github.com/stretchr/testify/assert"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
const Megabyte = 1024 * 1024
|
| 22 |
+
|
| 23 |
+
func TestMemtableSizeAdvisor_Initial(t *testing.T) {
|
| 24 |
+
a := newMemtableSizeAdvisor(memtableSizeAdvisorCfg{
|
| 25 |
+
initial: 10 * Megabyte,
|
| 26 |
+
})
|
| 27 |
+
|
| 28 |
+
assert.Equal(t, 10485760, a.Initial())
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
func TestMemtableSizeAdvisor_NextTarget(t *testing.T) {
|
| 32 |
+
a := newMemtableSizeAdvisor(memtableSizeAdvisorCfg{
|
| 33 |
+
initial: 10 * Megabyte,
|
| 34 |
+
minDuration: 10 * time.Second,
|
| 35 |
+
maxDuration: 30 * time.Second,
|
| 36 |
+
stepSize: 10 * Megabyte,
|
| 37 |
+
maxSize: 100 * Megabyte,
|
| 38 |
+
})
|
| 39 |
+
|
| 40 |
+
type test struct {
|
| 41 |
+
name string
|
| 42 |
+
current int
|
| 43 |
+
lastCycle time.Duration
|
| 44 |
+
expectedChanged bool
|
| 45 |
+
expectedTarget int
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
tests := []test{
|
| 49 |
+
{
|
| 50 |
+
name: "completely within range",
|
| 51 |
+
current: 10 * Megabyte,
|
| 52 |
+
lastCycle: 17 * time.Second,
|
| 53 |
+
expectedChanged: false,
|
| 54 |
+
expectedTarget: 10 * Megabyte,
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
name: "cycle too short",
|
| 58 |
+
current: 10 * Megabyte,
|
| 59 |
+
lastCycle: 7 * time.Second,
|
| 60 |
+
expectedChanged: true,
|
| 61 |
+
expectedTarget: 20 * Megabyte,
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
name: "cycle too long",
|
| 65 |
+
current: 100 * Megabyte,
|
| 66 |
+
lastCycle: 47 * time.Second,
|
| 67 |
+
expectedChanged: true,
|
| 68 |
+
expectedTarget: 90 * Megabyte,
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
name: "cycle too short, but approaching limit",
|
| 72 |
+
current: 95 * Megabyte,
|
| 73 |
+
lastCycle: 7 * time.Second,
|
| 74 |
+
expectedChanged: true,
|
| 75 |
+
expectedTarget: 100 * Megabyte, // not 105 (!)
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
name: "cycle too short, but already at limit",
|
| 79 |
+
current: 100 * Megabyte,
|
| 80 |
+
lastCycle: 7 * time.Second,
|
| 81 |
+
expectedChanged: false,
|
| 82 |
+
expectedTarget: 100 * Megabyte,
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
name: "cycle too long, but barely above initial size",
|
| 86 |
+
current: 12 * Megabyte,
|
| 87 |
+
lastCycle: 47 * time.Second,
|
| 88 |
+
expectedChanged: true,
|
| 89 |
+
expectedTarget: 10 * Megabyte, // not 2 (1)
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
name: "cycle too long, but already at initial size",
|
| 93 |
+
current: 10 * Megabyte,
|
| 94 |
+
lastCycle: 47 * time.Second,
|
| 95 |
+
expectedChanged: false,
|
| 96 |
+
expectedTarget: 10 * Megabyte,
|
| 97 |
+
},
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
for _, test := range tests {
|
| 101 |
+
t.Run(test.name, func(t *testing.T) {
|
| 102 |
+
newTarget, changed := a.NextTarget(test.current, test.lastCycle)
|
| 103 |
+
assert.Equal(t, test.expectedTarget, newTarget, "expect new target")
|
| 104 |
+
assert.Equal(t, test.expectedChanged, changed, "expect changed")
|
| 105 |
+
})
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
target, changed := a.NextTarget(10*1024*1024, 17*time.Second)
|
| 109 |
+
assert.False(t, changed)
|
| 110 |
+
assert.Equal(t, 10*1024*1024, target)
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
func TestMemtableSizeAdvisor_MissingConfig(t *testing.T) {
|
| 114 |
+
// even with an all-default value config the advisor should still return
|
| 115 |
+
// reasonable results, for example many integration tests might not provide a
|
| 116 |
+
// reasonable config to the advisor
|
| 117 |
+
a := newMemtableSizeAdvisor(memtableSizeAdvisorCfg{})
|
| 118 |
+
assert.Equal(t, 10485760, a.Initial())
|
| 119 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_test.go
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"path"
|
| 16 |
+
"testing"
|
| 17 |
+
|
| 18 |
+
"github.com/sirupsen/logrus/hooks/test"
|
| 19 |
+
"github.com/stretchr/testify/assert"
|
| 20 |
+
"github.com/stretchr/testify/require"
|
| 21 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
// This test prevents a regression on
|
| 25 |
+
// https://www.youtube.com/watch?v=OS8taasZl8k
|
| 26 |
+
func Test_MemtableSecondaryKeyBug(t *testing.T) {
|
| 27 |
+
dir := t.TempDir()
|
| 28 |
+
|
| 29 |
+
logger, _ := test.NewNullLogger()
|
| 30 |
+
cl, err := newCommitLogger(dir, StrategyReplace, 0)
|
| 31 |
+
require.NoError(t, err)
|
| 32 |
+
|
| 33 |
+
m, err := newMemtable(path.Join(dir, "will-never-flush"), StrategyReplace, 1, cl, nil, logger, false, nil, false, nil)
|
| 34 |
+
require.Nil(t, err)
|
| 35 |
+
t.Cleanup(func() {
|
| 36 |
+
require.Nil(t, m.commitlog.close())
|
| 37 |
+
})
|
| 38 |
+
|
| 39 |
+
t.Run("add initial value", func(t *testing.T) {
|
| 40 |
+
err = m.put([]byte("my-key"), []byte("my-value"),
|
| 41 |
+
WithSecondaryKey(0, []byte("secondary-key-initial")))
|
| 42 |
+
require.Nil(t, err)
|
| 43 |
+
})
|
| 44 |
+
|
| 45 |
+
t.Run("retrieve by primary", func(t *testing.T) {
|
| 46 |
+
val, err := m.get([]byte("my-key"))
|
| 47 |
+
require.Nil(t, err)
|
| 48 |
+
assert.Equal(t, []byte("my-value"), val)
|
| 49 |
+
})
|
| 50 |
+
|
| 51 |
+
t.Run("retrieve by initial secondary", func(t *testing.T) {
|
| 52 |
+
val, err := m.getBySecondary(0, []byte("secondary-key-initial"))
|
| 53 |
+
require.Nil(t, err)
|
| 54 |
+
assert.Equal(t, []byte("my-value"), val)
|
| 55 |
+
})
|
| 56 |
+
|
| 57 |
+
t.Run("update value with different secondary key", func(t *testing.T) {
|
| 58 |
+
err = m.put([]byte("my-key"), []byte("my-value-updated"),
|
| 59 |
+
WithSecondaryKey(0, []byte("different-secondary-key")))
|
| 60 |
+
require.Nil(t, err)
|
| 61 |
+
})
|
| 62 |
+
|
| 63 |
+
t.Run("retrieve by primary again", func(t *testing.T) {
|
| 64 |
+
val, err := m.get([]byte("my-key"))
|
| 65 |
+
require.Nil(t, err)
|
| 66 |
+
assert.Equal(t, []byte("my-value-updated"), val)
|
| 67 |
+
})
|
| 68 |
+
|
| 69 |
+
t.Run("retrieve by updated secondary", func(t *testing.T) {
|
| 70 |
+
val, err := m.getBySecondary(0, []byte("different-secondary-key"))
|
| 71 |
+
require.Nil(t, err)
|
| 72 |
+
assert.Equal(t, []byte("my-value-updated"), val)
|
| 73 |
+
})
|
| 74 |
+
|
| 75 |
+
t.Run("retrieve by initial secondary - should not find anything", func(t *testing.T) {
|
| 76 |
+
val, err := m.getBySecondary(0, []byte("secondary-key-initial"))
|
| 77 |
+
assert.Equal(t, lsmkv.NotFound, err)
|
| 78 |
+
assert.Nil(t, val)
|
| 79 |
+
})
|
| 80 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/metrics.go
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"time"
|
| 16 |
+
|
| 17 |
+
"github.com/prometheus/client_golang/prometheus"
|
| 18 |
+
"github.com/weaviate/weaviate/usecases/monitoring"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
type (
|
| 22 |
+
NsObserver func(ns int64)
|
| 23 |
+
BytesWriteObserver func(bytes int64)
|
| 24 |
+
BytesReadObserver func(bytes int64, nanoseconds int64)
|
| 25 |
+
Setter func(val uint64)
|
| 26 |
+
TimeObserver func(start time.Time)
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
type Metrics struct {
|
| 30 |
+
CompactionReplace *prometheus.GaugeVec
|
| 31 |
+
CompactionSet *prometheus.GaugeVec
|
| 32 |
+
CompactionMap *prometheus.GaugeVec
|
| 33 |
+
CompactionRoaringSet *prometheus.GaugeVec
|
| 34 |
+
CompactionRoaringSetRange *prometheus.GaugeVec
|
| 35 |
+
ActiveSegments *prometheus.GaugeVec
|
| 36 |
+
ObjectsBucketSegments *prometheus.GaugeVec
|
| 37 |
+
CompressedVecsBucketSegments *prometheus.GaugeVec
|
| 38 |
+
bloomFilters prometheus.ObserverVec
|
| 39 |
+
SegmentObjects *prometheus.GaugeVec
|
| 40 |
+
SegmentSize *prometheus.GaugeVec
|
| 41 |
+
SegmentCount *prometheus.GaugeVec
|
| 42 |
+
SegmentUnloaded *prometheus.GaugeVec
|
| 43 |
+
startupDurations prometheus.ObserverVec
|
| 44 |
+
startupDiskIO prometheus.ObserverVec
|
| 45 |
+
objectCount prometheus.Gauge
|
| 46 |
+
memtableDurations prometheus.ObserverVec
|
| 47 |
+
memtableSize *prometheus.GaugeVec
|
| 48 |
+
DimensionSum *prometheus.GaugeVec
|
| 49 |
+
IOWrite *prometheus.SummaryVec
|
| 50 |
+
IORead *prometheus.SummaryVec
|
| 51 |
+
LazySegmentUnLoad prometheus.Gauge
|
| 52 |
+
LazySegmentLoad prometheus.Gauge
|
| 53 |
+
LazySegmentClose prometheus.Gauge
|
| 54 |
+
LazySegmentInit prometheus.Gauge
|
| 55 |
+
|
| 56 |
+
groupClasses bool
|
| 57 |
+
criticalBucketsOnly bool
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
func NewMetrics(promMetrics *monitoring.PrometheusMetrics, className,
|
| 61 |
+
shardName string,
|
| 62 |
+
) *Metrics {
|
| 63 |
+
if promMetrics.Group {
|
| 64 |
+
className = "n/a"
|
| 65 |
+
shardName = "n/a"
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
replace := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{
|
| 69 |
+
"operation": "compact_lsm_segments_stratreplace",
|
| 70 |
+
"class_name": className,
|
| 71 |
+
"shard_name": shardName,
|
| 72 |
+
})
|
| 73 |
+
|
| 74 |
+
set := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{
|
| 75 |
+
"operation": "compact_lsm_segments_stratset",
|
| 76 |
+
"class_name": className,
|
| 77 |
+
"shard_name": shardName,
|
| 78 |
+
})
|
| 79 |
+
|
| 80 |
+
roaringSet := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{
|
| 81 |
+
"operation": "compact_lsm_segments_stratroaringset",
|
| 82 |
+
"class_name": className,
|
| 83 |
+
"shard_name": shardName,
|
| 84 |
+
})
|
| 85 |
+
|
| 86 |
+
roaringSetRange := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{
|
| 87 |
+
"operation": "compact_lsm_segments_stratroaringsetrange",
|
| 88 |
+
"class_name": className,
|
| 89 |
+
"shard_name": shardName,
|
| 90 |
+
})
|
| 91 |
+
|
| 92 |
+
stratMap := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{
|
| 93 |
+
"operation": "compact_lsm_segments_stratmap",
|
| 94 |
+
"class_name": className,
|
| 95 |
+
"shard_name": shardName,
|
| 96 |
+
})
|
| 97 |
+
|
| 98 |
+
lazySegmentInit := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{
|
| 99 |
+
"operation": "lazySegmentInit",
|
| 100 |
+
"class_name": className,
|
| 101 |
+
"shard_name": shardName,
|
| 102 |
+
"path": "n/a",
|
| 103 |
+
})
|
| 104 |
+
|
| 105 |
+
lazySegmentLoad := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{
|
| 106 |
+
"operation": "lazySegmentLoad",
|
| 107 |
+
"class_name": className,
|
| 108 |
+
"shard_name": shardName,
|
| 109 |
+
"path": "n/a",
|
| 110 |
+
})
|
| 111 |
+
|
| 112 |
+
lazySegmentClose := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{
|
| 113 |
+
"operation": "lazySegmentClose",
|
| 114 |
+
"class_name": className,
|
| 115 |
+
"shard_name": shardName,
|
| 116 |
+
"path": "n/a",
|
| 117 |
+
})
|
| 118 |
+
lazySegmentUnload := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{
|
| 119 |
+
"operation": "lazySegmentUnLoad",
|
| 120 |
+
"class_name": className,
|
| 121 |
+
"shard_name": shardName,
|
| 122 |
+
"path": "n/a",
|
| 123 |
+
})
|
| 124 |
+
|
| 125 |
+
return &Metrics{
|
| 126 |
+
groupClasses: promMetrics.Group,
|
| 127 |
+
criticalBucketsOnly: promMetrics.LSMCriticalBucketsOnly,
|
| 128 |
+
CompactionReplace: replace,
|
| 129 |
+
CompactionSet: set,
|
| 130 |
+
CompactionMap: stratMap,
|
| 131 |
+
CompactionRoaringSet: roaringSet,
|
| 132 |
+
CompactionRoaringSetRange: roaringSetRange,
|
| 133 |
+
ActiveSegments: promMetrics.LSMSegmentCount.MustCurryWith(prometheus.Labels{
|
| 134 |
+
"class_name": className,
|
| 135 |
+
"shard_name": shardName,
|
| 136 |
+
}),
|
| 137 |
+
ObjectsBucketSegments: promMetrics.LSMObjectsBucketSegmentCount.MustCurryWith(prometheus.Labels{
|
| 138 |
+
"class_name": className,
|
| 139 |
+
"shard_name": shardName,
|
| 140 |
+
}),
|
| 141 |
+
CompressedVecsBucketSegments: promMetrics.LSMCompressedVecsBucketSegmentCount.MustCurryWith(prometheus.Labels{
|
| 142 |
+
"class_name": className,
|
| 143 |
+
"shard_name": shardName,
|
| 144 |
+
}),
|
| 145 |
+
bloomFilters: promMetrics.LSMBloomFilters.MustCurryWith(prometheus.Labels{
|
| 146 |
+
"class_name": className,
|
| 147 |
+
"shard_name": shardName,
|
| 148 |
+
}),
|
| 149 |
+
SegmentObjects: promMetrics.LSMSegmentObjects.MustCurryWith(prometheus.Labels{
|
| 150 |
+
"class_name": className,
|
| 151 |
+
"shard_name": shardName,
|
| 152 |
+
}),
|
| 153 |
+
SegmentSize: promMetrics.LSMSegmentSize.MustCurryWith(prometheus.Labels{
|
| 154 |
+
"class_name": className,
|
| 155 |
+
"shard_name": shardName,
|
| 156 |
+
}),
|
| 157 |
+
SegmentCount: promMetrics.LSMSegmentCountByLevel.MustCurryWith(prometheus.Labels{
|
| 158 |
+
"class_name": className,
|
| 159 |
+
"shard_name": shardName,
|
| 160 |
+
}),
|
| 161 |
+
SegmentUnloaded: promMetrics.LSMSegmentUnloaded.MustCurryWith(prometheus.Labels{
|
| 162 |
+
"class_name": className,
|
| 163 |
+
"shard_name": shardName,
|
| 164 |
+
}),
|
| 165 |
+
startupDiskIO: promMetrics.StartupDiskIO.MustCurryWith(prometheus.Labels{
|
| 166 |
+
"class_name": className,
|
| 167 |
+
"shard_name": shardName,
|
| 168 |
+
}),
|
| 169 |
+
startupDurations: promMetrics.StartupDurations.MustCurryWith(prometheus.Labels{
|
| 170 |
+
"class_name": className,
|
| 171 |
+
"shard_name": shardName,
|
| 172 |
+
}),
|
| 173 |
+
objectCount: promMetrics.ObjectCount.With(prometheus.Labels{
|
| 174 |
+
"class_name": className,
|
| 175 |
+
"shard_name": shardName,
|
| 176 |
+
}),
|
| 177 |
+
memtableDurations: promMetrics.LSMMemtableDurations.MustCurryWith(prometheus.Labels{
|
| 178 |
+
"class_name": className,
|
| 179 |
+
"shard_name": shardName,
|
| 180 |
+
}),
|
| 181 |
+
memtableSize: promMetrics.LSMMemtableSize.MustCurryWith(prometheus.Labels{
|
| 182 |
+
"class_name": className,
|
| 183 |
+
"shard_name": shardName,
|
| 184 |
+
}),
|
| 185 |
+
DimensionSum: promMetrics.VectorDimensionsSum.MustCurryWith(prometheus.Labels{
|
| 186 |
+
"class_name": className,
|
| 187 |
+
"shard_name": shardName,
|
| 188 |
+
}),
|
| 189 |
+
IOWrite: promMetrics.FileIOWrites,
|
| 190 |
+
IORead: promMetrics.FileIOReads,
|
| 191 |
+
LazySegmentLoad: lazySegmentLoad,
|
| 192 |
+
LazySegmentClose: lazySegmentClose,
|
| 193 |
+
LazySegmentInit: lazySegmentInit,
|
| 194 |
+
LazySegmentUnLoad: lazySegmentUnload,
|
| 195 |
+
}
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
func noOpTimeObserver(start time.Time) {
|
| 199 |
+
// do nothing
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
func noOpNsObserver(startNs int64) {
|
| 203 |
+
// do nothing
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
func noOpNsReadObserver(startNs int64, time int64) {
|
| 207 |
+
// do nothing
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
func noOpSetter(val uint64) {
|
| 211 |
+
// do nothing
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
func (m *Metrics) MemtableOpObserver(path, strategy, op string) NsObserver {
|
| 215 |
+
if m == nil {
|
| 216 |
+
return noOpNsObserver
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
if m.groupClasses {
|
| 220 |
+
path = "n/a"
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
curried := m.memtableDurations.With(prometheus.Labels{
|
| 224 |
+
"operation": op,
|
| 225 |
+
"path": path,
|
| 226 |
+
"strategy": strategy,
|
| 227 |
+
})
|
| 228 |
+
|
| 229 |
+
return func(startNs int64) {
|
| 230 |
+
took := float64(time.Now().UnixNano()-startNs) / float64(time.Millisecond)
|
| 231 |
+
curried.Observe(took)
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
func (m *Metrics) MemtableWriteObserver(strategy, op string) BytesWriteObserver {
|
| 236 |
+
if m == nil {
|
| 237 |
+
return noOpNsObserver
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
curried := m.IOWrite.With(prometheus.Labels{
|
| 241 |
+
"operation": op,
|
| 242 |
+
"strategy": strategy,
|
| 243 |
+
})
|
| 244 |
+
|
| 245 |
+
return func(bytes int64) {
|
| 246 |
+
curried.Observe(float64(bytes))
|
| 247 |
+
}
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
func (m *Metrics) ReadObserver(op string) BytesReadObserver {
|
| 251 |
+
if m == nil {
|
| 252 |
+
return noOpNsReadObserver
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
curried := m.IORead.With(prometheus.Labels{
|
| 256 |
+
"operation": op,
|
| 257 |
+
})
|
| 258 |
+
|
| 259 |
+
return func(n int64, nanoseconds int64) { curried.Observe(float64(n)) }
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
func (m *Metrics) MemtableSizeSetter(path, strategy string) Setter {
|
| 263 |
+
if m == nil || m.groupClasses {
|
| 264 |
+
// this metric would set absolute values, that's not possible in
|
| 265 |
+
// grouped mode, each call would essentially overwrite the last
|
| 266 |
+
return noOpSetter
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
curried := m.memtableSize.With(prometheus.Labels{
|
| 270 |
+
"path": path,
|
| 271 |
+
"strategy": strategy,
|
| 272 |
+
})
|
| 273 |
+
|
| 274 |
+
return func(size uint64) {
|
| 275 |
+
curried.Set(float64(size))
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
func (m *Metrics) BloomFilterObserver(strategy, operation string) TimeObserver {
|
| 280 |
+
if m == nil {
|
| 281 |
+
return noOpTimeObserver
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
curried := m.bloomFilters.With(prometheus.Labels{
|
| 285 |
+
"strategy": strategy,
|
| 286 |
+
"operation": operation,
|
| 287 |
+
})
|
| 288 |
+
|
| 289 |
+
return func(before time.Time) {
|
| 290 |
+
curried.Observe(float64(time.Since(before)) / float64(time.Millisecond))
|
| 291 |
+
}
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
func (m *Metrics) TrackStartupReadWALDiskIO(read int64, nanoseconds int64) {
|
| 295 |
+
if m == nil {
|
| 296 |
+
return
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
seconds := float64(nanoseconds) / float64(time.Second)
|
| 300 |
+
throughput := float64(read) / float64(seconds)
|
| 301 |
+
m.startupDiskIO.With(prometheus.Labels{"operation": "lsm_recover_wal"}).Observe(throughput)
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
func (m *Metrics) TrackStartupBucket(start time.Time) {
|
| 305 |
+
if m == nil {
|
| 306 |
+
return
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
took := float64(time.Since(start)) / float64(time.Millisecond)
|
| 310 |
+
m.startupDurations.With(prometheus.Labels{"operation": "lsm_startup_bucket"}).Observe(took)
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
func (m *Metrics) TrackStartupBucketRecovery(start time.Time) {
|
| 314 |
+
if m == nil {
|
| 315 |
+
return
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
took := float64(time.Since(start)) / float64(time.Millisecond)
|
| 319 |
+
m.startupDurations.With(prometheus.Labels{"operation": "lsm_startup_bucket_recovery"}).Observe(took)
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
func (m *Metrics) ObjectCount(count int) {
|
| 323 |
+
if m == nil {
|
| 324 |
+
return
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
m.objectCount.Set(float64(count))
|
| 328 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mmap_vs_read_test.go
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"crypto/rand"
|
| 16 |
+
"io"
|
| 17 |
+
"os"
|
| 18 |
+
"strconv"
|
| 19 |
+
"testing"
|
| 20 |
+
|
| 21 |
+
"github.com/stretchr/testify/require"
|
| 22 |
+
"github.com/weaviate/weaviate/usecases/mmap"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
func BenchmarkMMap(b *testing.B) {
|
| 26 |
+
tests := []struct {
|
| 27 |
+
size int
|
| 28 |
+
}{
|
| 29 |
+
{size: 100},
|
| 30 |
+
{size: 1000},
|
| 31 |
+
{size: 10000},
|
| 32 |
+
{size: 100000},
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
dir := b.TempDir()
|
| 36 |
+
f, err := os.Create(dir + "/test.tmp")
|
| 37 |
+
require.NoError(b, err)
|
| 38 |
+
|
| 39 |
+
bytes := make([]byte, 100000)
|
| 40 |
+
read, err := rand.Read(bytes)
|
| 41 |
+
require.NoError(b, err)
|
| 42 |
+
require.Equal(b, read, len(bytes))
|
| 43 |
+
|
| 44 |
+
written, err := f.Write(bytes)
|
| 45 |
+
require.NoError(b, err)
|
| 46 |
+
require.Equal(b, written, len(bytes))
|
| 47 |
+
|
| 48 |
+
b.ResetTimer()
|
| 49 |
+
|
| 50 |
+
for _, test := range tests {
|
| 51 |
+
sum := 0
|
| 52 |
+
for i := range bytes[:test.size] {
|
| 53 |
+
sum += int(bytes[i])
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
b.Run(strconv.Itoa(test.size)+"mmap", func(b *testing.B) {
|
| 57 |
+
b.ReportAllocs()
|
| 58 |
+
|
| 59 |
+
for i := 0; i < b.N; i++ {
|
| 60 |
+
// not needed here, but we need to do it to have the same overhead in both tests
|
| 61 |
+
_, err := f.Seek(0, io.SeekStart)
|
| 62 |
+
require.NoError(b, err)
|
| 63 |
+
|
| 64 |
+
contents, err := mmap.MapRegion(f, int(test.size), mmap.RDONLY, 0, 0)
|
| 65 |
+
require.NoError(b, err)
|
| 66 |
+
|
| 67 |
+
innerSum := 0
|
| 68 |
+
for j := range contents {
|
| 69 |
+
innerSum += int(contents[j])
|
| 70 |
+
}
|
| 71 |
+
require.Equal(b, sum, innerSum)
|
| 72 |
+
require.NoError(b, contents.Unmap())
|
| 73 |
+
}
|
| 74 |
+
})
|
| 75 |
+
|
| 76 |
+
b.Run(strconv.Itoa(test.size)+"full read", func(b *testing.B) {
|
| 77 |
+
b.ReportAllocs()
|
| 78 |
+
|
| 79 |
+
for i := 0; i < b.N; i++ {
|
| 80 |
+
_, err := f.Seek(0, io.SeekStart)
|
| 81 |
+
require.NoError(b, err)
|
| 82 |
+
|
| 83 |
+
data := make([]byte, test.size)
|
| 84 |
+
n, err := f.Read(data)
|
| 85 |
+
if err != nil {
|
| 86 |
+
return
|
| 87 |
+
}
|
| 88 |
+
require.NoError(b, err)
|
| 89 |
+
require.Equal(b, n, test.size)
|
| 90 |
+
|
| 91 |
+
innerSum := 0
|
| 92 |
+
for j := range data {
|
| 93 |
+
innerSum += int(data[j])
|
| 94 |
+
}
|
| 95 |
+
require.Equal(b, sum, innerSum)
|
| 96 |
+
}
|
| 97 |
+
})
|
| 98 |
+
}
|
| 99 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mock_bucket_creator.go
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
// Code generated by mockery v2.53.2. DO NOT EDIT.
|
| 13 |
+
|
| 14 |
+
package lsmkv
|
| 15 |
+
|
| 16 |
+
import (
|
| 17 |
+
context "context"
|
| 18 |
+
|
| 19 |
+
logrus "github.com/sirupsen/logrus"
|
| 20 |
+
cyclemanager "github.com/weaviate/weaviate/entities/cyclemanager"
|
| 21 |
+
|
| 22 |
+
mock "github.com/stretchr/testify/mock"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
// MockBucketCreator is an autogenerated mock type for the BucketCreator type
|
| 26 |
+
type MockBucketCreator struct {
|
| 27 |
+
mock.Mock
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
type MockBucketCreator_Expecter struct {
|
| 31 |
+
mock *mock.Mock
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
func (_m *MockBucketCreator) EXPECT() *MockBucketCreator_Expecter {
|
| 35 |
+
return &MockBucketCreator_Expecter{mock: &_m.Mock}
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
// NewBucket provides a mock function with given fields: ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts
|
| 39 |
+
func (_m *MockBucketCreator) NewBucket(ctx context.Context, dir string, rootDir string, logger logrus.FieldLogger, metrics *Metrics, compactionCallbacks cyclemanager.CycleCallbackGroup, flushCallbacks cyclemanager.CycleCallbackGroup, opts ...BucketOption) (*Bucket, error) {
|
| 40 |
+
_va := make([]interface{}, len(opts))
|
| 41 |
+
for _i := range opts {
|
| 42 |
+
_va[_i] = opts[_i]
|
| 43 |
+
}
|
| 44 |
+
var _ca []interface{}
|
| 45 |
+
_ca = append(_ca, ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks)
|
| 46 |
+
_ca = append(_ca, _va...)
|
| 47 |
+
ret := _m.Called(_ca...)
|
| 48 |
+
|
| 49 |
+
if len(ret) == 0 {
|
| 50 |
+
panic("no return value specified for NewBucket")
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
var r0 *Bucket
|
| 54 |
+
var r1 error
|
| 55 |
+
if rf, ok := ret.Get(0).(func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) (*Bucket, error)); ok {
|
| 56 |
+
return rf(ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts...)
|
| 57 |
+
}
|
| 58 |
+
if rf, ok := ret.Get(0).(func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) *Bucket); ok {
|
| 59 |
+
r0 = rf(ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts...)
|
| 60 |
+
} else {
|
| 61 |
+
if ret.Get(0) != nil {
|
| 62 |
+
r0 = ret.Get(0).(*Bucket)
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
if rf, ok := ret.Get(1).(func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) error); ok {
|
| 67 |
+
r1 = rf(ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts...)
|
| 68 |
+
} else {
|
| 69 |
+
r1 = ret.Error(1)
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
return r0, r1
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
// MockBucketCreator_NewBucket_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewBucket'
|
| 76 |
+
type MockBucketCreator_NewBucket_Call struct {
|
| 77 |
+
*mock.Call
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// NewBucket is a helper method to define mock.On call
|
| 81 |
+
// - ctx context.Context
|
| 82 |
+
// - dir string
|
| 83 |
+
// - rootDir string
|
| 84 |
+
// - logger logrus.FieldLogger
|
| 85 |
+
// - metrics *Metrics
|
| 86 |
+
// - compactionCallbacks cyclemanager.CycleCallbackGroup
|
| 87 |
+
// - flushCallbacks cyclemanager.CycleCallbackGroup
|
| 88 |
+
// - opts ...BucketOption
|
| 89 |
+
func (_e *MockBucketCreator_Expecter) NewBucket(ctx interface{}, dir interface{}, rootDir interface{}, logger interface{}, metrics interface{}, compactionCallbacks interface{}, flushCallbacks interface{}, opts ...interface{}) *MockBucketCreator_NewBucket_Call {
|
| 90 |
+
return &MockBucketCreator_NewBucket_Call{Call: _e.mock.On("NewBucket",
|
| 91 |
+
append([]interface{}{ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks}, opts...)...)}
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
func (_c *MockBucketCreator_NewBucket_Call) Run(run func(ctx context.Context, dir string, rootDir string, logger logrus.FieldLogger, metrics *Metrics, compactionCallbacks cyclemanager.CycleCallbackGroup, flushCallbacks cyclemanager.CycleCallbackGroup, opts ...BucketOption)) *MockBucketCreator_NewBucket_Call {
|
| 95 |
+
_c.Call.Run(func(args mock.Arguments) {
|
| 96 |
+
variadicArgs := make([]BucketOption, len(args)-7)
|
| 97 |
+
for i, a := range args[7:] {
|
| 98 |
+
if a != nil {
|
| 99 |
+
variadicArgs[i] = a.(BucketOption)
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(logrus.FieldLogger), args[4].(*Metrics), args[5].(cyclemanager.CycleCallbackGroup), args[6].(cyclemanager.CycleCallbackGroup), variadicArgs...)
|
| 103 |
+
})
|
| 104 |
+
return _c
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
func (_c *MockBucketCreator_NewBucket_Call) Return(_a0 *Bucket, _a1 error) *MockBucketCreator_NewBucket_Call {
|
| 108 |
+
_c.Call.Return(_a0, _a1)
|
| 109 |
+
return _c
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
func (_c *MockBucketCreator_NewBucket_Call) RunAndReturn(run func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) (*Bucket, error)) *MockBucketCreator_NewBucket_Call {
|
| 113 |
+
_c.Call.Return(run)
|
| 114 |
+
return _c
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
// NewMockBucketCreator creates a new instance of MockBucketCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
| 118 |
+
// The first argument is typically a *testing.T value.
|
| 119 |
+
func NewMockBucketCreator(t interface {
|
| 120 |
+
mock.TestingT
|
| 121 |
+
Cleanup(func())
|
| 122 |
+
}) *MockBucketCreator {
|
| 123 |
+
mock := &MockBucketCreator{}
|
| 124 |
+
mock.Mock.Test(t)
|
| 125 |
+
|
| 126 |
+
t.Cleanup(func() { mock.AssertExpectations(t) })
|
| 127 |
+
|
| 128 |
+
return mock
|
| 129 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys.go
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"bytes"
|
| 16 |
+
"math"
|
| 17 |
+
"sort"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
// QuantileKeys returns an approximation of the keys that make up the specified
|
| 21 |
+
// quantiles. This can be used to start parallel cursors at fairly evenly
|
| 22 |
+
// distributed positions in the segment.
|
| 23 |
+
//
|
| 24 |
+
// To understand the approximation, checkout
|
| 25 |
+
// [lsmkv.segmentindex.DiskTree.QuantileKeys] that runs on each segment.
|
| 26 |
+
//
|
| 27 |
+
// Some things to keep in mind:
|
| 28 |
+
//
|
| 29 |
+
// 1. It may return fewer keys than requested (including 0) if the segment
|
| 30 |
+
// contains fewer entries
|
| 31 |
+
// 2. It may return keys that do not exist, for example because they are
|
| 32 |
+
// tombstoned. This is acceptable, as a key does not have to exist to be used
|
| 33 |
+
// as part of .Seek() in a cursor.
|
| 34 |
+
// 3. It will never return duplicates, to make sure all parallel cursors
|
| 35 |
+
// return unique values.
|
| 36 |
+
func (b *Bucket) QuantileKeys(q int) [][]byte {
|
| 37 |
+
if q <= 0 {
|
| 38 |
+
return nil
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
b.flushLock.RLock()
|
| 42 |
+
defer b.flushLock.RUnlock()
|
| 43 |
+
|
| 44 |
+
keys := b.disk.quantileKeys(q)
|
| 45 |
+
return keys
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
func (sg *SegmentGroup) quantileKeys(q int) [][]byte {
|
| 49 |
+
segments, release := sg.getAndLockSegments()
|
| 50 |
+
defer release()
|
| 51 |
+
|
| 52 |
+
var keys [][]byte
|
| 53 |
+
|
| 54 |
+
if len(segments) == 0 {
|
| 55 |
+
return keys
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
for _, s := range segments {
|
| 59 |
+
keys = append(keys, s.quantileKeys(q)...)
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
// re-sort keys
|
| 63 |
+
sort.Slice(keys, func(i, j int) bool {
|
| 64 |
+
return bytes.Compare(keys[i], keys[j]) < 0
|
| 65 |
+
})
|
| 66 |
+
|
| 67 |
+
// There could be duplicates if a key was modified in multiple segments, we
|
| 68 |
+
// need to remove them. Since the list is sorted at this, this is fairly easy
|
| 69 |
+
// to do:
|
| 70 |
+
uniqueKeys := make([][]byte, 0, len(keys))
|
| 71 |
+
for i := range keys {
|
| 72 |
+
if i == 0 || !bytes.Equal(keys[i], keys[i-1]) {
|
| 73 |
+
uniqueKeys = append(uniqueKeys, keys[i])
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
return pickEvenlyDistributedKeys(uniqueKeys, q)
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
func (s *segment) quantileKeys(q int) [][]byte {
|
| 81 |
+
return s.index.QuantileKeys(q)
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
// pickEvenlyDistributedKeys picks q keys from the input keys, trying to keep
|
| 85 |
+
// the distribution as even as possible. The input keys are assumed to be
|
| 86 |
+
// sorted. It never returns duplicates, see the unit test proving this.
|
| 87 |
+
//
|
| 88 |
+
// Important to keep in mind is that our input values do not contain the first
|
| 89 |
+
// and last elements, but rather the first quantile points.
|
| 90 |
+
// This is because they were obtained using
|
| 91 |
+
// [lsmkv.segmentindex.DiskTree.QuantileKeys] which traverses the binary tree
|
| 92 |
+
// to a certain depth. The first element in the list is the element you get
|
| 93 |
+
// from continuously following the left child until you hit the maximum
|
| 94 |
+
// traversal depth. Respectively, the last element is the element you get from
|
| 95 |
+
// continuously following the right child until you hit the maximum traversal
|
| 96 |
+
// depth.
|
| 97 |
+
// This means that when a cursor uses those keys, it will need to add two
|
| 98 |
+
// special cases:
|
| 99 |
+
//
|
| 100 |
+
// 1. It needs to start with the actual first element and read to the first
|
| 101 |
+
// checkpoint
|
| 102 |
+
// 2. When reaching the last checkpoint, it needs to keep reading
|
| 103 |
+
// until the cursor no longer returns elements.
|
| 104 |
+
//
|
| 105 |
+
// As a result our goal here is to keep the gaps as even as possible. For
|
| 106 |
+
// example, assume the keys ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"]
|
| 107 |
+
// and we want to pick 3 keys. We would return ["C", "F", "I"], thus keeping
|
| 108 |
+
// the spacing fairly even.
|
| 109 |
+
func pickEvenlyDistributedKeys(uniqueKeys [][]byte, q int) [][]byte {
|
| 110 |
+
if q >= len(uniqueKeys) {
|
| 111 |
+
// impossible to pick, simply return the input
|
| 112 |
+
return uniqueKeys
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
// we now have the guarantee that q > len(uniqueKeys), which means it is
|
| 116 |
+
// possible to pick q keys without overlap while keeping the distribution as
|
| 117 |
+
// even as possible
|
| 118 |
+
finalKeys := make([][]byte, q)
|
| 119 |
+
stepSize := float64(len(uniqueKeys)) / float64(q)
|
| 120 |
+
for i := range finalKeys {
|
| 121 |
+
pos := int(math.Round(float64(i)*stepSize + 0.5*stepSize))
|
| 122 |
+
|
| 123 |
+
finalKeys[i] = uniqueKeys[pos]
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
return finalKeys
|
| 127 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys_test.go
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"context"
|
| 16 |
+
"encoding/binary"
|
| 17 |
+
"fmt"
|
| 18 |
+
"testing"
|
| 19 |
+
|
| 20 |
+
"github.com/sirupsen/logrus/hooks/test"
|
| 21 |
+
"github.com/stretchr/testify/assert"
|
| 22 |
+
"github.com/stretchr/testify/require"
|
| 23 |
+
"github.com/weaviate/weaviate/entities/cyclemanager"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
func TestQuantileKeysSingleSegment(t *testing.T) {
|
| 27 |
+
dir := t.TempDir()
|
| 28 |
+
ctx := context.Background()
|
| 29 |
+
logger, _ := test.NewNullLogger()
|
| 30 |
+
|
| 31 |
+
b, err := NewBucketCreator().NewBucket(
|
| 32 |
+
ctx, dir, "", logger, nil, cyclemanager.NewCallbackGroupNoop(),
|
| 33 |
+
cyclemanager.NewCallbackGroupNoop())
|
| 34 |
+
require.Nil(t, err)
|
| 35 |
+
|
| 36 |
+
importConsecutiveKeys(t, b, 0, 1000)
|
| 37 |
+
|
| 38 |
+
// all cyclemmanagers are noops, so we need to explicitly flush if we want a
|
| 39 |
+
// segment to be built
|
| 40 |
+
require.Nil(t, b.FlushAndSwitch())
|
| 41 |
+
|
| 42 |
+
quantiles := b.QuantileKeys(10)
|
| 43 |
+
|
| 44 |
+
asNumbers := make([]uint64, len(quantiles))
|
| 45 |
+
for i, q := range quantiles {
|
| 46 |
+
asNumbers[i] = binary.BigEndian.Uint64(q)
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
// validate there are no duplicates, and each key is strictly greater than
|
| 50 |
+
// the last
|
| 51 |
+
for i, n := range asNumbers {
|
| 52 |
+
if i == 0 {
|
| 53 |
+
continue
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
prev := asNumbers[i-1]
|
| 57 |
+
assert.Greater(t, n, prev)
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// assert on distribution
|
| 61 |
+
idealStepSize := float64(1000) / float64(len(asNumbers)+1)
|
| 62 |
+
for i, n := range asNumbers {
|
| 63 |
+
actualStepSize := float64(n) / float64(i+1)
|
| 64 |
+
assert.InEpsilon(t, idealStepSize, actualStepSize, 0.1)
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
func TestQuantileKeysMultipleSegmentsUniqueEntries(t *testing.T) {
|
| 69 |
+
dir := t.TempDir()
|
| 70 |
+
ctx := context.Background()
|
| 71 |
+
logger, _ := test.NewNullLogger()
|
| 72 |
+
|
| 73 |
+
b, err := NewBucketCreator().NewBucket(
|
| 74 |
+
ctx, dir, "", logger, nil, cyclemanager.NewCallbackGroupNoop(),
|
| 75 |
+
cyclemanager.NewCallbackGroupNoop())
|
| 76 |
+
require.Nil(t, err)
|
| 77 |
+
|
| 78 |
+
importConsecutiveKeys(t, b, 0, 1000)
|
| 79 |
+
|
| 80 |
+
// all cyclemmanagers are noops, so we need to explicitly flush if we want a
|
| 81 |
+
// segment to be built
|
| 82 |
+
require.Nil(t, b.FlushAndSwitch())
|
| 83 |
+
|
| 84 |
+
importConsecutiveKeys(t, b, 1000, 2000)
|
| 85 |
+
|
| 86 |
+
// all cyclemmanagers are noops, so we need to explicitly flush if we want a
|
| 87 |
+
// segment to be built
|
| 88 |
+
require.Nil(t, b.FlushAndSwitch())
|
| 89 |
+
|
| 90 |
+
quantiles := b.QuantileKeys(10)
|
| 91 |
+
|
| 92 |
+
asNumbers := make([]uint64, len(quantiles))
|
| 93 |
+
for i, q := range quantiles {
|
| 94 |
+
asNumbers[i] = binary.BigEndian.Uint64(q)
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
// validate there are no duplicates, and each key is strictly greater than
|
| 98 |
+
// the last
|
| 99 |
+
for i, n := range asNumbers {
|
| 100 |
+
if i == 0 {
|
| 101 |
+
continue
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
prev := asNumbers[i-1]
|
| 105 |
+
assert.Greater(t, n, prev)
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
func importConsecutiveKeys(t *testing.T, b *Bucket, start, end uint64) {
|
| 110 |
+
for i := start; i < end; i++ {
|
| 111 |
+
key := make([]byte, 8)
|
| 112 |
+
binary.BigEndian.PutUint64(key, i)
|
| 113 |
+
err := b.Put(key, key)
|
| 114 |
+
require.Nil(t, err)
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
func TestKeyDistributionExample(t *testing.T) {
|
| 119 |
+
inputKeyStrings := []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J"}
|
| 120 |
+
inputKeys := make([][]byte, len(inputKeyStrings))
|
| 121 |
+
for i, s := range inputKeyStrings {
|
| 122 |
+
inputKeys[i] = []byte(s)
|
| 123 |
+
}
|
| 124 |
+
q := 3
|
| 125 |
+
|
| 126 |
+
picked := pickEvenlyDistributedKeys(inputKeys, q)
|
| 127 |
+
expectKeyStrings := []string{"C", "F", "I"}
|
| 128 |
+
expectKeys := make([][]byte, len(expectKeyStrings))
|
| 129 |
+
for i, s := range expectKeyStrings {
|
| 130 |
+
expectKeys[i] = []byte(s)
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
assert.Equal(t, expectKeys, picked)
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
func TestPickEvenlyDistributedKeys(t *testing.T) {
|
| 137 |
+
for input := 0; input < 100; input++ {
|
| 138 |
+
for q := 1; q < 100; q++ {
|
| 139 |
+
t.Run(fmt.Sprintf("input=%d, q=%d", input, q), func(t *testing.T) {
|
| 140 |
+
keys := make([][]byte, input)
|
| 141 |
+
for i := 0; i < input; i++ {
|
| 142 |
+
key := make([]byte, 8)
|
| 143 |
+
binary.BigEndian.PutUint64(key, uint64(i))
|
| 144 |
+
keys[i] = key
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
picked := pickEvenlyDistributedKeys(keys, q)
|
| 148 |
+
|
| 149 |
+
// make sure there are never more results than q
|
| 150 |
+
require.LessOrEqual(t, len(picked), q)
|
| 151 |
+
|
| 152 |
+
// make sure that we get q results if there are at least q keys
|
| 153 |
+
if input >= q {
|
| 154 |
+
require.Equal(t, q, len(picked))
|
| 155 |
+
} else {
|
| 156 |
+
// if there are fewer keys than q, we should get all of them
|
| 157 |
+
require.Equal(t, input, len(picked))
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
// make sure there are no duplicates
|
| 161 |
+
for i, key := range picked {
|
| 162 |
+
if i == 0 {
|
| 163 |
+
continue
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
prev := binary.BigEndian.Uint64(picked[i-1])
|
| 167 |
+
curr := binary.BigEndian.Uint64(key)
|
| 168 |
+
|
| 169 |
+
require.Greater(t, curr, prev, "found duplicate picks")
|
| 170 |
+
}
|
| 171 |
+
})
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/rbtree/rbtree.go
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package rbtree
|
| 13 |
+
|
| 14 |
+
type Node interface {
|
| 15 |
+
Parent() Node
|
| 16 |
+
SetParent(Node)
|
| 17 |
+
Left() Node
|
| 18 |
+
SetLeft(Node)
|
| 19 |
+
Right() Node
|
| 20 |
+
SetRight(Node)
|
| 21 |
+
IsRed() bool
|
| 22 |
+
SetRed(bool)
|
| 23 |
+
IsNil() bool
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
// This function rebalances and recolours trees to be valid RB trees. It needs to be called after each node that
|
| 27 |
+
// was added to the tree.
|
| 28 |
+
//
|
| 29 |
+
// Deletions are currently not supported as this is done through the tombstone flag and from the POV of the RB-tree
|
| 30 |
+
// tombstone-nodes are just normal nodes that get rebalanced the normal way.
|
| 31 |
+
//
|
| 32 |
+
// Throughout this file the following relationships between nodes are used:
|
| 33 |
+
// GP = grandparent, P = parent, U = uncle, S = sibling, N = node that was just added
|
| 34 |
+
//
|
| 35 |
+
// GP
|
| 36 |
+
// / \
|
| 37 |
+
// U P
|
| 38 |
+
// / \
|
| 39 |
+
// S N
|
| 40 |
+
func Rebalance(node Node) Node {
|
| 41 |
+
for {
|
| 42 |
+
parent := node.Parent()
|
| 43 |
+
|
| 44 |
+
// if parent is black or the current node is the root node (== parent is nil) there is nothing to do
|
| 45 |
+
if !parent.IsRed() {
|
| 46 |
+
return nil
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
grandparent := node.Parent().Parent()
|
| 50 |
+
var uncle Node
|
| 51 |
+
if parent == grandparent.Right() {
|
| 52 |
+
uncle = grandparent.Left()
|
| 53 |
+
} else {
|
| 54 |
+
uncle = grandparent.Right()
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
if uncle.IsRed() {
|
| 58 |
+
// if uncle is red, recoloring the tree up to the grandparent results in a valid RBtree.
|
| 59 |
+
// The color of the grandfather changes to red, so there might be more fixes needed. Therefore
|
| 60 |
+
// go up the tree and repeat.
|
| 61 |
+
recolourNodes(parent, grandparent, uncle)
|
| 62 |
+
node = grandparent
|
| 63 |
+
} else {
|
| 64 |
+
// if uncle is black, there are four possible cases:
|
| 65 |
+
// parent is the right child grandparent:
|
| 66 |
+
// 1) node is right child of parent => left rotate around GP
|
| 67 |
+
// 2) node is left child of parent => right rotate around parent results in case 1
|
| 68 |
+
// For cases 3 and 4 just replace left and right in the two cases above
|
| 69 |
+
//
|
| 70 |
+
// In all of these cases the grandfather stays black and there is no need for further fixes up the tree
|
| 71 |
+
var newRoot Node
|
| 72 |
+
if parent == grandparent.Right() {
|
| 73 |
+
if node == parent.Left() {
|
| 74 |
+
rightRotate(parent)
|
| 75 |
+
// node and parent switch places in the tree, update parent to recolour the current node
|
| 76 |
+
parent = node
|
| 77 |
+
}
|
| 78 |
+
newRoot = leftRotate(grandparent)
|
| 79 |
+
} else { // parent == grandparent.left
|
| 80 |
+
if node == parent.Right() {
|
| 81 |
+
leftRotate(parent)
|
| 82 |
+
parent = node
|
| 83 |
+
}
|
| 84 |
+
newRoot = rightRotate(grandparent)
|
| 85 |
+
}
|
| 86 |
+
recolourNodes(grandparent, parent)
|
| 87 |
+
return newRoot
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
func recolourNodes(nodes ...Node) {
|
| 93 |
+
for _, n := range nodes {
|
| 94 |
+
if !n.IsNil() {
|
| 95 |
+
if n.IsRed() {
|
| 96 |
+
n.SetRed(false)
|
| 97 |
+
} else {
|
| 98 |
+
n.SetRed(true)
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
// Rotate the tree left around the given node.
|
| 105 |
+
//
|
| 106 |
+
// After this rotation, the former right child (FC) will be the new parent and the former parent (FP) will
|
| 107 |
+
// be the left node of the new parent. The left child of the former child is transferred to the former parent.
|
| 108 |
+
//
|
| 109 |
+
// FP FC
|
| 110 |
+
// / \ left rotate / \
|
| 111 |
+
// FP_R FC => FP FC_R
|
| 112 |
+
// / \ / \
|
| 113 |
+
// FC_L FC_R FP_R FC_L
|
| 114 |
+
//
|
| 115 |
+
// In case FP was the root of the tree, FC will be the new root of the tree.
|
| 116 |
+
func leftRotate(rotationNode Node) Node {
|
| 117 |
+
formerChild := rotationNode.Right()
|
| 118 |
+
rootRotate := rotationNode.Parent().IsNil()
|
| 119 |
+
|
| 120 |
+
// former child node becomes new parent unless the rotation is around the root node
|
| 121 |
+
if rootRotate {
|
| 122 |
+
formerChild.SetParent(nil)
|
| 123 |
+
} else {
|
| 124 |
+
if rotationNode.Parent().Left() == rotationNode {
|
| 125 |
+
rotationNode.Parent().SetLeft(formerChild)
|
| 126 |
+
} else {
|
| 127 |
+
rotationNode.Parent().SetRight(formerChild)
|
| 128 |
+
}
|
| 129 |
+
formerChild.SetParent(rotationNode.Parent())
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
rotationNode.SetParent(formerChild)
|
| 133 |
+
|
| 134 |
+
// Switch left child from former_child to rotation node
|
| 135 |
+
rotationNode.SetRight(formerChild.Left())
|
| 136 |
+
if formerChild.Left() != nil {
|
| 137 |
+
formerChild.Left().SetParent(rotationNode)
|
| 138 |
+
}
|
| 139 |
+
formerChild.SetLeft(rotationNode)
|
| 140 |
+
|
| 141 |
+
if rootRotate {
|
| 142 |
+
return formerChild
|
| 143 |
+
} else {
|
| 144 |
+
return nil
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
// Same as leftRotate, just switch left and right everywhere
|
| 149 |
+
func rightRotate(rotationNode Node) Node {
|
| 150 |
+
formerChild := rotationNode.Left()
|
| 151 |
+
rootRotate := rotationNode.Parent().IsNil()
|
| 152 |
+
|
| 153 |
+
if rootRotate {
|
| 154 |
+
formerChild.SetParent(nil)
|
| 155 |
+
} else {
|
| 156 |
+
if rotationNode.Parent().Left() == rotationNode {
|
| 157 |
+
rotationNode.Parent().SetLeft(formerChild)
|
| 158 |
+
} else {
|
| 159 |
+
rotationNode.Parent().SetRight(formerChild)
|
| 160 |
+
}
|
| 161 |
+
formerChild.SetParent(rotationNode.Parent())
|
| 162 |
+
}
|
| 163 |
+
rotationNode.SetParent(formerChild)
|
| 164 |
+
|
| 165 |
+
rotationNode.SetLeft(formerChild.Right())
|
| 166 |
+
if formerChild.Right() != nil {
|
| 167 |
+
formerChild.Right().SetParent(rotationNode)
|
| 168 |
+
}
|
| 169 |
+
formerChild.SetRight(rotationNode)
|
| 170 |
+
|
| 171 |
+
if rootRotate {
|
| 172 |
+
return formerChild
|
| 173 |
+
} else {
|
| 174 |
+
return nil
|
| 175 |
+
}
|
| 176 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/recover_from_wal_integration_test.go
ADDED
|
@@ -0,0 +1,973 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
//go:build integrationTest
|
| 13 |
+
// +build integrationTest
|
| 14 |
+
|
| 15 |
+
package lsmkv
|
| 16 |
+
|
| 17 |
+
import (
|
| 18 |
+
"bytes"
|
| 19 |
+
"context"
|
| 20 |
+
"fmt"
|
| 21 |
+
"io"
|
| 22 |
+
"os"
|
| 23 |
+
"os/exec"
|
| 24 |
+
"path/filepath"
|
| 25 |
+
"testing"
|
| 26 |
+
|
| 27 |
+
"github.com/stretchr/testify/assert"
|
| 28 |
+
"github.com/stretchr/testify/require"
|
| 29 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 30 |
+
"github.com/weaviate/weaviate/entities/cyclemanager"
|
| 31 |
+
"github.com/weaviate/weaviate/entities/filters"
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
func TestReplaceStrategy_RecoverFromWAL(t *testing.T) {
|
| 35 |
+
dirNameOriginal := t.TempDir()
|
| 36 |
+
dirNameRecovered := t.TempDir()
|
| 37 |
+
|
| 38 |
+
t.Run("with some previous state", func(t *testing.T) {
|
| 39 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil,
|
| 40 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 41 |
+
WithStrategy(StrategyReplace), WithMinWalThreshold(0))
|
| 42 |
+
require.Nil(t, err)
|
| 43 |
+
|
| 44 |
+
// so big it effectively never triggers as part of this test
|
| 45 |
+
b.SetMemtableThreshold(1e9)
|
| 46 |
+
|
| 47 |
+
t.Run("set one key that will be flushed orderly", func(t *testing.T) {
|
| 48 |
+
// the motivation behind flushing this initial segment is to check that
|
| 49 |
+
// deletion as part of the recovery also works correctly. If we would
|
| 50 |
+
// just delete something that was created as part of the same memtable,
|
| 51 |
+
// the tests would still pass, even with removing the logic that recovers
|
| 52 |
+
// tombstones.
|
| 53 |
+
//
|
| 54 |
+
// To make sure they fail in this case, this prior state was introduced.
|
| 55 |
+
// An entry with key "key-2" is introduced in a previous segment, so if
|
| 56 |
+
// the deletion fails as part of the recovery this key would still be
|
| 57 |
+
// present later on. With the deletion working correctly it will be gone.
|
| 58 |
+
//
|
| 59 |
+
// You can test this by commenting the "p.memtable.setTombstone()" line
|
| 60 |
+
// in p.doReplace(). This will fail the tests suite, but prior to this
|
| 61 |
+
// addition it would have passed.
|
| 62 |
+
key2 := []byte("key-2")
|
| 63 |
+
orig2 := []byte("delete me later - you should never find me again")
|
| 64 |
+
|
| 65 |
+
err = b.Put(key2, orig2)
|
| 66 |
+
require.Nil(t, err)
|
| 67 |
+
})
|
| 68 |
+
|
| 69 |
+
t.Run("shutdown (orderly) bucket to create first segment", func(t *testing.T) {
|
| 70 |
+
b.Shutdown(context.Background())
|
| 71 |
+
|
| 72 |
+
// then recreate bucket
|
| 73 |
+
var err error
|
| 74 |
+
b, err = NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil,
|
| 75 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 76 |
+
WithStrategy(StrategyReplace), WithMinWalThreshold(0))
|
| 77 |
+
require.Nil(t, err)
|
| 78 |
+
})
|
| 79 |
+
|
| 80 |
+
t.Run("set original values", func(t *testing.T) {
|
| 81 |
+
key1 := []byte("key-1")
|
| 82 |
+
key2 := []byte("key-2")
|
| 83 |
+
key3 := []byte("key-3")
|
| 84 |
+
orig1 := []byte("original value for key1")
|
| 85 |
+
orig2 := []byte("original value for key2")
|
| 86 |
+
orig3 := []byte("original value for key3")
|
| 87 |
+
|
| 88 |
+
err = b.Put(key1, orig1)
|
| 89 |
+
require.Nil(t, err)
|
| 90 |
+
err = b.Put(key2, orig2)
|
| 91 |
+
require.Nil(t, err)
|
| 92 |
+
err = b.Put(key3, orig3)
|
| 93 |
+
require.Nil(t, err)
|
| 94 |
+
})
|
| 95 |
+
|
| 96 |
+
t.Run("delete one, update one", func(t *testing.T) {
|
| 97 |
+
key2 := []byte("key-2")
|
| 98 |
+
key3 := []byte("key-3")
|
| 99 |
+
updated3 := []byte("updated value for key 3")
|
| 100 |
+
|
| 101 |
+
err = b.Delete(key2)
|
| 102 |
+
require.Nil(t, err)
|
| 103 |
+
|
| 104 |
+
err = b.Put(key3, updated3)
|
| 105 |
+
require.Nil(t, err)
|
| 106 |
+
})
|
| 107 |
+
|
| 108 |
+
t.Run("verify control", func(t *testing.T) {
|
| 109 |
+
key1 := []byte("key-1")
|
| 110 |
+
key2 := []byte("key-2")
|
| 111 |
+
key3 := []byte("key-3")
|
| 112 |
+
orig1 := []byte("original value for key1")
|
| 113 |
+
updated3 := []byte("updated value for key 3")
|
| 114 |
+
res, err := b.Get(key1)
|
| 115 |
+
require.Nil(t, err)
|
| 116 |
+
assert.Equal(t, res, orig1)
|
| 117 |
+
res, err = b.Get(key2)
|
| 118 |
+
require.Nil(t, err)
|
| 119 |
+
assert.Nil(t, res)
|
| 120 |
+
res, err = b.Get(key3)
|
| 121 |
+
require.Nil(t, err)
|
| 122 |
+
assert.Equal(t, res, updated3)
|
| 123 |
+
})
|
| 124 |
+
|
| 125 |
+
t.Run("make sure the WAL is flushed", func(t *testing.T) {
|
| 126 |
+
require.Nil(t, b.WriteWAL())
|
| 127 |
+
})
|
| 128 |
+
|
| 129 |
+
t.Run("copy state into recovery folder and destroy original", func(t *testing.T) {
|
| 130 |
+
t.Run("copy over wals", func(t *testing.T) {
|
| 131 |
+
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s",
|
| 132 |
+
dirNameOriginal, dirNameRecovered))
|
| 133 |
+
var out bytes.Buffer
|
| 134 |
+
cmd.Stderr = &out
|
| 135 |
+
err := cmd.Run()
|
| 136 |
+
if err != nil {
|
| 137 |
+
fmt.Println(out.String())
|
| 138 |
+
t.Fatal(err)
|
| 139 |
+
}
|
| 140 |
+
})
|
| 141 |
+
|
| 142 |
+
t.Run("copy over segments", func(t *testing.T) {
|
| 143 |
+
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.db %s",
|
| 144 |
+
dirNameOriginal, dirNameRecovered))
|
| 145 |
+
var out bytes.Buffer
|
| 146 |
+
cmd.Stderr = &out
|
| 147 |
+
err := cmd.Run()
|
| 148 |
+
if err != nil {
|
| 149 |
+
fmt.Println(out.String())
|
| 150 |
+
t.Fatal(err)
|
| 151 |
+
}
|
| 152 |
+
})
|
| 153 |
+
b = nil
|
| 154 |
+
require.Nil(t, os.RemoveAll(dirNameOriginal))
|
| 155 |
+
})
|
| 156 |
+
|
| 157 |
+
var bRec *Bucket
|
| 158 |
+
|
| 159 |
+
t.Run("create new bucket from existing state", func(t *testing.T) {
|
| 160 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil,
|
| 161 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 162 |
+
WithStrategy(StrategyReplace), WithMinWalThreshold(0))
|
| 163 |
+
require.Nil(t, err)
|
| 164 |
+
|
| 165 |
+
// so big it effectively never triggers as part of this test
|
| 166 |
+
b.SetMemtableThreshold(1e9)
|
| 167 |
+
|
| 168 |
+
bRec = b
|
| 169 |
+
})
|
| 170 |
+
|
| 171 |
+
t.Run("verify all data is present", func(t *testing.T) {
|
| 172 |
+
key1 := []byte("key-1")
|
| 173 |
+
key2 := []byte("key-2")
|
| 174 |
+
key3 := []byte("key-3")
|
| 175 |
+
orig1 := []byte("original value for key1")
|
| 176 |
+
updated3 := []byte("updated value for key 3")
|
| 177 |
+
res, err := bRec.Get(key1)
|
| 178 |
+
require.Nil(t, err)
|
| 179 |
+
assert.Equal(t, res, orig1)
|
| 180 |
+
res, err = bRec.Get(key2)
|
| 181 |
+
require.Nil(t, err)
|
| 182 |
+
assert.Nil(t, res)
|
| 183 |
+
res, err = bRec.Get(key3)
|
| 184 |
+
require.Nil(t, err)
|
| 185 |
+
assert.Equal(t, res, updated3)
|
| 186 |
+
})
|
| 187 |
+
})
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
func TestReplaceStrategy_RecoverFromWALWithCorruptLastElement(t *testing.T) {
|
| 191 |
+
dirNameOriginal := t.TempDir()
|
| 192 |
+
dirNameRecovered := t.TempDir()
|
| 193 |
+
|
| 194 |
+
t.Run("without previous state", func(t *testing.T) {
|
| 195 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil,
|
| 196 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 197 |
+
WithStrategy(StrategyReplace))
|
| 198 |
+
require.Nil(t, err)
|
| 199 |
+
|
| 200 |
+
// so big it effectively never triggers as part of this test
|
| 201 |
+
b.SetMemtableThreshold(1e9)
|
| 202 |
+
|
| 203 |
+
t.Run("set original values", func(t *testing.T) {
|
| 204 |
+
key1 := []byte("key-1")
|
| 205 |
+
key2 := []byte("key-2")
|
| 206 |
+
key3 := []byte("key-3")
|
| 207 |
+
orig1 := []byte("original value for key1")
|
| 208 |
+
orig2 := []byte("original value for key2")
|
| 209 |
+
orig3 := []byte("original value for key3")
|
| 210 |
+
|
| 211 |
+
err = b.Put(key1, orig1)
|
| 212 |
+
require.Nil(t, err)
|
| 213 |
+
err = b.Put(key2, orig2)
|
| 214 |
+
require.Nil(t, err)
|
| 215 |
+
err = b.Put(key3, orig3)
|
| 216 |
+
require.Nil(t, err)
|
| 217 |
+
})
|
| 218 |
+
|
| 219 |
+
t.Run("delete one, update one", func(t *testing.T) {
|
| 220 |
+
key2 := []byte("key-2")
|
| 221 |
+
key3 := []byte("key-3")
|
| 222 |
+
updated3 := []byte("updated value for key 3")
|
| 223 |
+
|
| 224 |
+
err = b.Delete(key2)
|
| 225 |
+
require.Nil(t, err)
|
| 226 |
+
|
| 227 |
+
err = b.Put(key3, updated3)
|
| 228 |
+
require.Nil(t, err)
|
| 229 |
+
})
|
| 230 |
+
|
| 231 |
+
t.Run("verify control", func(t *testing.T) {
|
| 232 |
+
key1 := []byte("key-1")
|
| 233 |
+
key2 := []byte("key-2")
|
| 234 |
+
key3 := []byte("key-3")
|
| 235 |
+
orig1 := []byte("original value for key1")
|
| 236 |
+
updated3 := []byte("updated value for key 3")
|
| 237 |
+
res, err := b.Get(key1)
|
| 238 |
+
require.Nil(t, err)
|
| 239 |
+
assert.Equal(t, res, orig1)
|
| 240 |
+
res, err = b.Get(key2)
|
| 241 |
+
require.Nil(t, err)
|
| 242 |
+
assert.Nil(t, res)
|
| 243 |
+
res, err = b.Get(key3)
|
| 244 |
+
require.Nil(t, err)
|
| 245 |
+
assert.Equal(t, res, updated3)
|
| 246 |
+
})
|
| 247 |
+
|
| 248 |
+
t.Run("make sure the WAL is flushed", func(t *testing.T) {
|
| 249 |
+
require.Nil(t, b.WriteWAL())
|
| 250 |
+
})
|
| 251 |
+
|
| 252 |
+
t.Run("copy state into recovery folder and destroy original", func(t *testing.T) {
|
| 253 |
+
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s",
|
| 254 |
+
dirNameOriginal, dirNameRecovered))
|
| 255 |
+
var out bytes.Buffer
|
| 256 |
+
cmd.Stderr = &out
|
| 257 |
+
err := cmd.Run()
|
| 258 |
+
if err != nil {
|
| 259 |
+
fmt.Println(out.String())
|
| 260 |
+
t.Fatal(err)
|
| 261 |
+
}
|
| 262 |
+
b = nil
|
| 263 |
+
require.Nil(t, os.RemoveAll(dirNameOriginal))
|
| 264 |
+
})
|
| 265 |
+
|
| 266 |
+
t.Run("corrupt WAL by removing some bytes at the very end", func(t *testing.T) {
|
| 267 |
+
entries, err := os.ReadDir(dirNameRecovered)
|
| 268 |
+
require.Nil(t, err)
|
| 269 |
+
require.Len(t, entries, 1, "there should be exactly one .wal file")
|
| 270 |
+
|
| 271 |
+
oldFileName := filepath.Join(dirNameRecovered, entries[0].Name())
|
| 272 |
+
tmpFileName := oldFileName + ".tmp"
|
| 273 |
+
|
| 274 |
+
err = os.Rename(oldFileName, tmpFileName)
|
| 275 |
+
require.Nil(t, err)
|
| 276 |
+
|
| 277 |
+
orig, err := os.Open(tmpFileName)
|
| 278 |
+
require.Nil(t, err)
|
| 279 |
+
|
| 280 |
+
correctLog, err := io.ReadAll(orig)
|
| 281 |
+
require.Nil(t, err)
|
| 282 |
+
err = orig.Close()
|
| 283 |
+
require.Nil(t, err)
|
| 284 |
+
|
| 285 |
+
corruptLog := correctLog[:len(correctLog)-6]
|
| 286 |
+
|
| 287 |
+
err = os.Remove(tmpFileName)
|
| 288 |
+
require.Nil(t, err)
|
| 289 |
+
|
| 290 |
+
corrupt, err := os.Create(oldFileName)
|
| 291 |
+
require.Nil(t, err)
|
| 292 |
+
|
| 293 |
+
_, err = corrupt.Write(corruptLog)
|
| 294 |
+
require.Nil(t, err)
|
| 295 |
+
|
| 296 |
+
err = corrupt.Close()
|
| 297 |
+
require.Nil(t, err)
|
| 298 |
+
})
|
| 299 |
+
|
| 300 |
+
var bRec *Bucket
|
| 301 |
+
|
| 302 |
+
t.Run("create new bucket from existing state", func(t *testing.T) {
|
| 303 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil,
|
| 304 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 305 |
+
WithStrategy(StrategyReplace))
|
| 306 |
+
require.Nil(t, err)
|
| 307 |
+
|
| 308 |
+
// so big it effectively never triggers as part of this test
|
| 309 |
+
b.SetMemtableThreshold(1e9)
|
| 310 |
+
|
| 311 |
+
bRec = b
|
| 312 |
+
})
|
| 313 |
+
|
| 314 |
+
t.Run("verify all data prior to the corruption is present", func(t *testing.T) {
|
| 315 |
+
key1 := []byte("key-1")
|
| 316 |
+
key2 := []byte("key-2")
|
| 317 |
+
key3 := []byte("key-3")
|
| 318 |
+
orig1 := []byte("original value for key1")
|
| 319 |
+
notUpdated3 := []byte("original value for key3")
|
| 320 |
+
|
| 321 |
+
// the last operation we performed (that now got corrupted) was an update
|
| 322 |
+
// on key3. So now that we're expecting all state prior to the corruption
|
| 323 |
+
// to be present, we would expect the original value for key3
|
| 324 |
+
|
| 325 |
+
res, err := bRec.Get(key1)
|
| 326 |
+
require.Nil(t, err)
|
| 327 |
+
assert.Equal(t, res, orig1)
|
| 328 |
+
res, err = bRec.Get(key2)
|
| 329 |
+
require.Nil(t, err)
|
| 330 |
+
assert.Nil(t, res)
|
| 331 |
+
res, err = bRec.Get(key3)
|
| 332 |
+
require.Nil(t, err)
|
| 333 |
+
assert.Equal(t, res, notUpdated3)
|
| 334 |
+
})
|
| 335 |
+
})
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
func TestSetStrategy_RecoverFromWAL(t *testing.T) {
|
| 339 |
+
dirNameOriginal := t.TempDir()
|
| 340 |
+
dirNameRecovered := t.TempDir()
|
| 341 |
+
|
| 342 |
+
t.Run("without prior state", func(t *testing.T) {
|
| 343 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil,
|
| 344 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 345 |
+
WithStrategy(StrategySetCollection))
|
| 346 |
+
require.Nil(t, err)
|
| 347 |
+
|
| 348 |
+
// so big it effectively never triggers as part of this test
|
| 349 |
+
b.SetMemtableThreshold(1e9)
|
| 350 |
+
|
| 351 |
+
key1 := []byte("test1-key-1")
|
| 352 |
+
key2 := []byte("test1-key-2")
|
| 353 |
+
key3 := []byte("test1-key-3")
|
| 354 |
+
|
| 355 |
+
t.Run("set original values and verify", func(t *testing.T) {
|
| 356 |
+
orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")}
|
| 357 |
+
orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")}
|
| 358 |
+
orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")}
|
| 359 |
+
|
| 360 |
+
err = b.SetAdd(key1, orig1)
|
| 361 |
+
require.Nil(t, err)
|
| 362 |
+
err = b.SetAdd(key2, orig2)
|
| 363 |
+
require.Nil(t, err)
|
| 364 |
+
err = b.SetAdd(key3, orig3)
|
| 365 |
+
require.Nil(t, err)
|
| 366 |
+
|
| 367 |
+
res, err := b.SetList(key1)
|
| 368 |
+
require.Nil(t, err)
|
| 369 |
+
assert.Equal(t, orig1, res)
|
| 370 |
+
res, err = b.SetList(key2)
|
| 371 |
+
require.Nil(t, err)
|
| 372 |
+
assert.Equal(t, orig2, res)
|
| 373 |
+
res, err = b.SetList(key3)
|
| 374 |
+
require.Nil(t, err)
|
| 375 |
+
assert.Equal(t, orig3, res)
|
| 376 |
+
})
|
| 377 |
+
|
| 378 |
+
t.Run("delete individual keys", func(t *testing.T) {
|
| 379 |
+
delete2 := []byte("value 2.1")
|
| 380 |
+
delete3 := []byte("value 3.2")
|
| 381 |
+
|
| 382 |
+
err = b.SetDeleteSingle(key2, delete2)
|
| 383 |
+
require.Nil(t, err)
|
| 384 |
+
err = b.SetDeleteSingle(key3, delete3)
|
| 385 |
+
require.Nil(t, err)
|
| 386 |
+
})
|
| 387 |
+
|
| 388 |
+
t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) {
|
| 389 |
+
readd2 := [][]byte{[]byte("value 2.1"), []byte("value 2.3")}
|
| 390 |
+
readd3 := [][]byte{[]byte("value 3.2"), []byte("value 3.3")}
|
| 391 |
+
|
| 392 |
+
err = b.SetAdd(key2, readd2)
|
| 393 |
+
require.Nil(t, err)
|
| 394 |
+
err = b.SetAdd(key3, readd3)
|
| 395 |
+
require.Nil(t, err)
|
| 396 |
+
})
|
| 397 |
+
|
| 398 |
+
t.Run("validate the results prior to recovery", func(t *testing.T) {
|
| 399 |
+
expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged
|
| 400 |
+
expected2 := [][]byte{
|
| 401 |
+
[]byte("value 2.2"), // from original import
|
| 402 |
+
[]byte("value 2.1"), // added again after initial deletion
|
| 403 |
+
[]byte("value 2.3"), // newly added
|
| 404 |
+
}
|
| 405 |
+
expected3 := [][]byte{
|
| 406 |
+
[]byte("value 3.1"), // form original import
|
| 407 |
+
[]byte("value 3.2"), // added again after initial deletion
|
| 408 |
+
[]byte("value 3.3"), // newly added
|
| 409 |
+
} // value2 deleted
|
| 410 |
+
|
| 411 |
+
res, err := b.SetList(key1)
|
| 412 |
+
require.Nil(t, err)
|
| 413 |
+
assert.Equal(t, expected1, res)
|
| 414 |
+
res, err = b.SetList(key2)
|
| 415 |
+
require.Nil(t, err)
|
| 416 |
+
assert.Equal(t, expected2, res)
|
| 417 |
+
res, err = b.SetList(key3)
|
| 418 |
+
require.Nil(t, err)
|
| 419 |
+
assert.Equal(t, expected3, res)
|
| 420 |
+
})
|
| 421 |
+
|
| 422 |
+
t.Run("make sure the WAL is flushed", func(t *testing.T) {
|
| 423 |
+
require.Nil(t, b.WriteWAL())
|
| 424 |
+
})
|
| 425 |
+
|
| 426 |
+
t.Run("copy state into recovery folder and destroy original", func(t *testing.T) {
|
| 427 |
+
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s",
|
| 428 |
+
dirNameOriginal, dirNameRecovered))
|
| 429 |
+
var out bytes.Buffer
|
| 430 |
+
cmd.Stderr = &out
|
| 431 |
+
err := cmd.Run()
|
| 432 |
+
if err != nil {
|
| 433 |
+
fmt.Println(out.String())
|
| 434 |
+
t.Fatal(err)
|
| 435 |
+
}
|
| 436 |
+
b = nil
|
| 437 |
+
require.Nil(t, os.RemoveAll(dirNameOriginal))
|
| 438 |
+
})
|
| 439 |
+
|
| 440 |
+
var bRec *Bucket
|
| 441 |
+
|
| 442 |
+
t.Run("create new bucket from existing state", func(t *testing.T) {
|
| 443 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil,
|
| 444 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 445 |
+
WithStrategy(StrategySetCollection))
|
| 446 |
+
require.Nil(t, err)
|
| 447 |
+
|
| 448 |
+
// so big it effectively never triggers as part of this test
|
| 449 |
+
b.SetMemtableThreshold(1e9)
|
| 450 |
+
|
| 451 |
+
bRec = b
|
| 452 |
+
})
|
| 453 |
+
|
| 454 |
+
t.Run("validate the results after recovery", func(t *testing.T) {
|
| 455 |
+
expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged
|
| 456 |
+
expected2 := [][]byte{
|
| 457 |
+
[]byte("value 2.2"), // from original import
|
| 458 |
+
[]byte("value 2.1"), // added again after initial deletion
|
| 459 |
+
[]byte("value 2.3"), // newly added
|
| 460 |
+
}
|
| 461 |
+
expected3 := [][]byte{
|
| 462 |
+
[]byte("value 3.1"), // form original import
|
| 463 |
+
[]byte("value 3.2"), // added again after initial deletion
|
| 464 |
+
[]byte("value 3.3"), // newly added
|
| 465 |
+
} // value2 deleted
|
| 466 |
+
|
| 467 |
+
res, err := bRec.SetList(key1)
|
| 468 |
+
require.Nil(t, err)
|
| 469 |
+
assert.Equal(t, expected1, res)
|
| 470 |
+
res, err = bRec.SetList(key2)
|
| 471 |
+
require.Nil(t, err)
|
| 472 |
+
assert.Equal(t, expected2, res)
|
| 473 |
+
res, err = bRec.SetList(key3)
|
| 474 |
+
require.Nil(t, err)
|
| 475 |
+
assert.Equal(t, expected3, res)
|
| 476 |
+
})
|
| 477 |
+
})
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
func TestRoaringSetStrategy_RecoverFromWAL(t *testing.T) {
|
| 481 |
+
dirNameOriginal := t.TempDir()
|
| 482 |
+
dirNameRecovered := t.TempDir()
|
| 483 |
+
|
| 484 |
+
t.Run("without prior state", func(t *testing.T) {
|
| 485 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil,
|
| 486 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 487 |
+
WithStrategy(StrategyRoaringSet), WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()))
|
| 488 |
+
require.Nil(t, err)
|
| 489 |
+
|
| 490 |
+
key1 := []byte("test1-key-1")
|
| 491 |
+
key2 := []byte("test1-key-2")
|
| 492 |
+
key3 := []byte("test1-key-3")
|
| 493 |
+
|
| 494 |
+
t.Run("set original values and verify", func(t *testing.T) {
|
| 495 |
+
orig1 := []uint64{11, 12}
|
| 496 |
+
orig2 := []uint64{21, 22}
|
| 497 |
+
orig3 := []uint64{31, 32}
|
| 498 |
+
|
| 499 |
+
err = b.RoaringSetAddList(key1, orig1)
|
| 500 |
+
require.NoError(t, err)
|
| 501 |
+
err = b.RoaringSetAddList(key2, orig2)
|
| 502 |
+
require.NoError(t, err)
|
| 503 |
+
err = b.RoaringSetAddList(key3, orig3)
|
| 504 |
+
require.NoError(t, err)
|
| 505 |
+
|
| 506 |
+
bm1, release, err := b.RoaringSetGet(key1)
|
| 507 |
+
require.NoError(t, err)
|
| 508 |
+
defer release()
|
| 509 |
+
assert.ElementsMatch(t, orig1, bm1.ToArray())
|
| 510 |
+
|
| 511 |
+
bm2, release, err := b.RoaringSetGet(key2)
|
| 512 |
+
require.NoError(t, err)
|
| 513 |
+
defer release()
|
| 514 |
+
assert.ElementsMatch(t, orig2, bm2.ToArray())
|
| 515 |
+
|
| 516 |
+
bm3, release, err := b.RoaringSetGet(key3)
|
| 517 |
+
require.NoError(t, err)
|
| 518 |
+
defer release()
|
| 519 |
+
assert.ElementsMatch(t, orig3, bm3.ToArray())
|
| 520 |
+
})
|
| 521 |
+
|
| 522 |
+
t.Run("delete individual keys", func(t *testing.T) {
|
| 523 |
+
delete2 := uint64(21)
|
| 524 |
+
delete3 := uint64(32)
|
| 525 |
+
|
| 526 |
+
err = b.RoaringSetRemoveOne(key2, delete2)
|
| 527 |
+
require.NoError(t, err)
|
| 528 |
+
err = b.RoaringSetRemoveOne(key3, delete3)
|
| 529 |
+
require.NoError(t, err)
|
| 530 |
+
})
|
| 531 |
+
|
| 532 |
+
t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) {
|
| 533 |
+
reAdd2 := []uint64{21, 23}
|
| 534 |
+
reAdd3 := []uint64{31, 33}
|
| 535 |
+
|
| 536 |
+
err = b.RoaringSetAddList(key2, reAdd2)
|
| 537 |
+
require.NoError(t, err)
|
| 538 |
+
err = b.RoaringSetAddList(key3, reAdd3)
|
| 539 |
+
require.NoError(t, err)
|
| 540 |
+
})
|
| 541 |
+
|
| 542 |
+
t.Run("validate the results prior to recovery", func(t *testing.T) {
|
| 543 |
+
expected1 := []uint64{11, 12} // unchanged
|
| 544 |
+
expected2 := []uint64{
|
| 545 |
+
22, // from original import
|
| 546 |
+
21, // added again after initial deletion
|
| 547 |
+
23, // newly added
|
| 548 |
+
}
|
| 549 |
+
expected3 := []uint64{
|
| 550 |
+
31, // form original import
|
| 551 |
+
33, // newly added
|
| 552 |
+
} // 32 deleted
|
| 553 |
+
|
| 554 |
+
bm1, release, err := b.RoaringSetGet(key1)
|
| 555 |
+
require.NoError(t, err)
|
| 556 |
+
defer release()
|
| 557 |
+
assert.ElementsMatch(t, expected1, bm1.ToArray())
|
| 558 |
+
|
| 559 |
+
bm2, release, err := b.RoaringSetGet(key2)
|
| 560 |
+
require.NoError(t, err)
|
| 561 |
+
defer release()
|
| 562 |
+
assert.ElementsMatch(t, expected2, bm2.ToArray())
|
| 563 |
+
|
| 564 |
+
bm3, release, err := b.RoaringSetGet(key3)
|
| 565 |
+
require.NoError(t, err)
|
| 566 |
+
defer release()
|
| 567 |
+
assert.ElementsMatch(t, expected3, bm3.ToArray())
|
| 568 |
+
})
|
| 569 |
+
|
| 570 |
+
t.Run("make sure the WAL is flushed", func(t *testing.T) {
|
| 571 |
+
require.Nil(t, b.WriteWAL())
|
| 572 |
+
})
|
| 573 |
+
|
| 574 |
+
t.Run("copy state into recovery folder and destroy original", func(t *testing.T) {
|
| 575 |
+
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s",
|
| 576 |
+
dirNameOriginal, dirNameRecovered))
|
| 577 |
+
var out bytes.Buffer
|
| 578 |
+
cmd.Stderr = &out
|
| 579 |
+
err := cmd.Run()
|
| 580 |
+
if err != nil {
|
| 581 |
+
fmt.Println(out.String())
|
| 582 |
+
t.Fatal(err)
|
| 583 |
+
}
|
| 584 |
+
b = nil
|
| 585 |
+
require.Nil(t, os.RemoveAll(dirNameOriginal))
|
| 586 |
+
})
|
| 587 |
+
|
| 588 |
+
var bRec *Bucket
|
| 589 |
+
|
| 590 |
+
t.Run("create new bucket from existing state", func(t *testing.T) {
|
| 591 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil,
|
| 592 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 593 |
+
WithStrategy(StrategyRoaringSet), WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()))
|
| 594 |
+
require.Nil(t, err)
|
| 595 |
+
|
| 596 |
+
bRec = b
|
| 597 |
+
})
|
| 598 |
+
|
| 599 |
+
t.Run("validate the results after recovery", func(t *testing.T) {
|
| 600 |
+
expected1 := []uint64{11, 12} // unchanged
|
| 601 |
+
expected2 := []uint64{
|
| 602 |
+
22, // from original import
|
| 603 |
+
21, // added again after initial deletion
|
| 604 |
+
23, // newly added
|
| 605 |
+
}
|
| 606 |
+
expected3 := []uint64{
|
| 607 |
+
31, // form original import
|
| 608 |
+
33, // newly added
|
| 609 |
+
} // 32 deleted
|
| 610 |
+
|
| 611 |
+
bm1, release, err := bRec.RoaringSetGet(key1)
|
| 612 |
+
require.NoError(t, err)
|
| 613 |
+
defer release()
|
| 614 |
+
assert.ElementsMatch(t, expected1, bm1.ToArray())
|
| 615 |
+
|
| 616 |
+
bm2, release, err := bRec.RoaringSetGet(key2)
|
| 617 |
+
require.NoError(t, err)
|
| 618 |
+
defer release()
|
| 619 |
+
assert.ElementsMatch(t, expected2, bm2.ToArray())
|
| 620 |
+
|
| 621 |
+
bm3, release, err := bRec.RoaringSetGet(key3)
|
| 622 |
+
require.NoError(t, err)
|
| 623 |
+
defer release()
|
| 624 |
+
assert.ElementsMatch(t, expected3, bm3.ToArray())
|
| 625 |
+
})
|
| 626 |
+
})
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
func TestRoaringSetRangeStrategy_RecoverFromWAL(t *testing.T) {
|
| 630 |
+
dirNameOriginal := t.TempDir()
|
| 631 |
+
dirNameRecovered := t.TempDir()
|
| 632 |
+
|
| 633 |
+
t.Run("without prior state", func(t *testing.T) {
|
| 634 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil,
|
| 635 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 636 |
+
WithStrategy(StrategyRoaringSetRange))
|
| 637 |
+
require.Nil(t, err)
|
| 638 |
+
|
| 639 |
+
key1 := uint64(1)
|
| 640 |
+
key2 := uint64(2)
|
| 641 |
+
key3 := uint64(3)
|
| 642 |
+
|
| 643 |
+
t.Run("set original values and verify", func(t *testing.T) {
|
| 644 |
+
orig1 := []uint64{11, 12}
|
| 645 |
+
orig2 := []uint64{21, 22}
|
| 646 |
+
orig3 := []uint64{31, 32}
|
| 647 |
+
|
| 648 |
+
err = b.RoaringSetRangeAdd(key1, orig1...)
|
| 649 |
+
require.NoError(t, err)
|
| 650 |
+
err = b.RoaringSetRangeAdd(key2, orig2...)
|
| 651 |
+
require.NoError(t, err)
|
| 652 |
+
err = b.RoaringSetRangeAdd(key3, orig3...)
|
| 653 |
+
require.NoError(t, err)
|
| 654 |
+
|
| 655 |
+
reader := b.ReaderRoaringSetRange()
|
| 656 |
+
defer reader.Close()
|
| 657 |
+
|
| 658 |
+
bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual)
|
| 659 |
+
require.NoError(t, err)
|
| 660 |
+
defer release1()
|
| 661 |
+
assert.ElementsMatch(t, orig1, bm1.ToArray())
|
| 662 |
+
|
| 663 |
+
bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual)
|
| 664 |
+
require.NoError(t, err)
|
| 665 |
+
defer release2()
|
| 666 |
+
assert.ElementsMatch(t, orig2, bm2.ToArray())
|
| 667 |
+
|
| 668 |
+
bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual)
|
| 669 |
+
require.NoError(t, err)
|
| 670 |
+
defer release3()
|
| 671 |
+
assert.ElementsMatch(t, orig3, bm3.ToArray())
|
| 672 |
+
})
|
| 673 |
+
|
| 674 |
+
t.Run("delete individual keys", func(t *testing.T) {
|
| 675 |
+
delete2 := uint64(21)
|
| 676 |
+
delete3 := uint64(32)
|
| 677 |
+
|
| 678 |
+
err = b.RoaringSetRangeRemove(key2, delete2)
|
| 679 |
+
require.NoError(t, err)
|
| 680 |
+
err = b.RoaringSetRangeRemove(key3, delete3)
|
| 681 |
+
require.NoError(t, err)
|
| 682 |
+
})
|
| 683 |
+
|
| 684 |
+
t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) {
|
| 685 |
+
reAdd2 := []uint64{21, 23}
|
| 686 |
+
reAdd3 := []uint64{31, 33}
|
| 687 |
+
|
| 688 |
+
err = b.RoaringSetRangeAdd(key2, reAdd2...)
|
| 689 |
+
require.NoError(t, err)
|
| 690 |
+
err = b.RoaringSetRangeAdd(key3, reAdd3...)
|
| 691 |
+
require.NoError(t, err)
|
| 692 |
+
})
|
| 693 |
+
|
| 694 |
+
t.Run("validate the results prior to recovery", func(t *testing.T) {
|
| 695 |
+
expected1 := []uint64{11, 12} // unchanged
|
| 696 |
+
expected2 := []uint64{
|
| 697 |
+
22, // from original import
|
| 698 |
+
21, // added again after initial deletion
|
| 699 |
+
23, // newly added
|
| 700 |
+
}
|
| 701 |
+
expected3 := []uint64{
|
| 702 |
+
31, // form original import
|
| 703 |
+
33, // newly added
|
| 704 |
+
} // 32 deleted
|
| 705 |
+
|
| 706 |
+
reader := b.ReaderRoaringSetRange()
|
| 707 |
+
defer reader.Close()
|
| 708 |
+
|
| 709 |
+
bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual)
|
| 710 |
+
require.NoError(t, err)
|
| 711 |
+
defer release1()
|
| 712 |
+
assert.ElementsMatch(t, expected1, bm1.ToArray())
|
| 713 |
+
|
| 714 |
+
bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual)
|
| 715 |
+
require.NoError(t, err)
|
| 716 |
+
defer release2()
|
| 717 |
+
assert.ElementsMatch(t, expected2, bm2.ToArray())
|
| 718 |
+
|
| 719 |
+
bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual)
|
| 720 |
+
require.NoError(t, err)
|
| 721 |
+
defer release3()
|
| 722 |
+
assert.ElementsMatch(t, expected3, bm3.ToArray())
|
| 723 |
+
})
|
| 724 |
+
|
| 725 |
+
t.Run("make sure the WAL is flushed", func(t *testing.T) {
|
| 726 |
+
require.Nil(t, b.WriteWAL())
|
| 727 |
+
})
|
| 728 |
+
|
| 729 |
+
t.Run("copy state into recovery folder and destroy original", func(t *testing.T) {
|
| 730 |
+
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s",
|
| 731 |
+
dirNameOriginal, dirNameRecovered))
|
| 732 |
+
var out bytes.Buffer
|
| 733 |
+
cmd.Stderr = &out
|
| 734 |
+
err := cmd.Run()
|
| 735 |
+
if err != nil {
|
| 736 |
+
fmt.Println(out.String())
|
| 737 |
+
t.Fatal(err)
|
| 738 |
+
}
|
| 739 |
+
b = nil
|
| 740 |
+
require.Nil(t, os.RemoveAll(dirNameOriginal))
|
| 741 |
+
})
|
| 742 |
+
|
| 743 |
+
var bRec *Bucket
|
| 744 |
+
|
| 745 |
+
t.Run("create new bucket from existing state", func(t *testing.T) {
|
| 746 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil,
|
| 747 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 748 |
+
WithStrategy(StrategyRoaringSetRange))
|
| 749 |
+
require.Nil(t, err)
|
| 750 |
+
|
| 751 |
+
bRec = b
|
| 752 |
+
})
|
| 753 |
+
|
| 754 |
+
t.Run("validate the results after recovery", func(t *testing.T) {
|
| 755 |
+
expected1 := []uint64{11, 12} // unchanged
|
| 756 |
+
expected2 := []uint64{
|
| 757 |
+
22, // from original import
|
| 758 |
+
21, // added again after initial deletion
|
| 759 |
+
23, // newly added
|
| 760 |
+
}
|
| 761 |
+
expected3 := []uint64{
|
| 762 |
+
31, // form original import
|
| 763 |
+
33, // newly added
|
| 764 |
+
} // 32 deleted
|
| 765 |
+
|
| 766 |
+
reader := bRec.ReaderRoaringSetRange()
|
| 767 |
+
defer reader.Close()
|
| 768 |
+
|
| 769 |
+
bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual)
|
| 770 |
+
require.NoError(t, err)
|
| 771 |
+
defer release1()
|
| 772 |
+
assert.ElementsMatch(t, expected1, bm1.ToArray())
|
| 773 |
+
|
| 774 |
+
bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual)
|
| 775 |
+
require.NoError(t, err)
|
| 776 |
+
defer release2()
|
| 777 |
+
assert.ElementsMatch(t, expected2, bm2.ToArray())
|
| 778 |
+
|
| 779 |
+
bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual)
|
| 780 |
+
require.NoError(t, err)
|
| 781 |
+
defer release3()
|
| 782 |
+
assert.ElementsMatch(t, expected3, bm3.ToArray())
|
| 783 |
+
})
|
| 784 |
+
})
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
func TestMapStrategy_RecoverFromWAL(t *testing.T) {
|
| 788 |
+
dirNameOriginal := t.TempDir()
|
| 789 |
+
dirNameRecovered := t.TempDir()
|
| 790 |
+
|
| 791 |
+
t.Run("without prior state", func(t *testing.T) {
|
| 792 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil,
|
| 793 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 794 |
+
WithStrategy(StrategyMapCollection))
|
| 795 |
+
require.Nil(t, err)
|
| 796 |
+
|
| 797 |
+
// so big it effectively never triggers as part of this test
|
| 798 |
+
b.SetMemtableThreshold(1e9)
|
| 799 |
+
|
| 800 |
+
rowKey1 := []byte("test1-key-1")
|
| 801 |
+
rowKey2 := []byte("test1-key-2")
|
| 802 |
+
|
| 803 |
+
t.Run("set original values and verify", func(t *testing.T) {
|
| 804 |
+
row1Map := []MapPair{
|
| 805 |
+
{
|
| 806 |
+
Key: []byte("row1-key1"),
|
| 807 |
+
Value: []byte("row1-key1-value1"),
|
| 808 |
+
}, {
|
| 809 |
+
Key: []byte("row1-key2"),
|
| 810 |
+
Value: []byte("row1-key2-value1"),
|
| 811 |
+
},
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
row2Map := []MapPair{
|
| 815 |
+
{
|
| 816 |
+
Key: []byte("row2-key1"),
|
| 817 |
+
Value: []byte("row2-key1-value1"),
|
| 818 |
+
}, {
|
| 819 |
+
Key: []byte("row2-key2"),
|
| 820 |
+
Value: []byte("row2-key2-value1"),
|
| 821 |
+
},
|
| 822 |
+
}
|
| 823 |
+
|
| 824 |
+
for _, pair := range row1Map {
|
| 825 |
+
err = b.MapSet(rowKey1, pair)
|
| 826 |
+
require.Nil(t, err)
|
| 827 |
+
}
|
| 828 |
+
|
| 829 |
+
for _, pair := range row2Map {
|
| 830 |
+
err = b.MapSet(rowKey2, pair)
|
| 831 |
+
require.Nil(t, err)
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
res, err := b.MapList(context.Background(), rowKey1)
|
| 835 |
+
require.Nil(t, err)
|
| 836 |
+
assert.Equal(t, row1Map, res)
|
| 837 |
+
res, err = b.MapList(context.Background(), rowKey2)
|
| 838 |
+
require.Nil(t, err)
|
| 839 |
+
assert.Equal(t, res, row2Map)
|
| 840 |
+
})
|
| 841 |
+
|
| 842 |
+
t.Run("replace an existing map key", func(t *testing.T) {
|
| 843 |
+
err = b.MapSet(rowKey1, MapPair{
|
| 844 |
+
Key: []byte("row1-key1"), // existing key
|
| 845 |
+
Value: []byte("row1-key1-value2"), // updated value
|
| 846 |
+
})
|
| 847 |
+
require.Nil(t, err)
|
| 848 |
+
|
| 849 |
+
row1Updated := []MapPair{
|
| 850 |
+
{
|
| 851 |
+
Key: []byte("row1-key1"),
|
| 852 |
+
Value: []byte("row1-key1-value2"), // <--- updated, rest unchanged
|
| 853 |
+
}, {
|
| 854 |
+
Key: []byte("row1-key2"),
|
| 855 |
+
Value: []byte("row1-key2-value1"),
|
| 856 |
+
},
|
| 857 |
+
}
|
| 858 |
+
|
| 859 |
+
row2Unchanged := []MapPair{
|
| 860 |
+
{
|
| 861 |
+
Key: []byte("row2-key1"),
|
| 862 |
+
Value: []byte("row2-key1-value1"),
|
| 863 |
+
}, {
|
| 864 |
+
Key: []byte("row2-key2"),
|
| 865 |
+
Value: []byte("row2-key2-value1"),
|
| 866 |
+
},
|
| 867 |
+
}
|
| 868 |
+
|
| 869 |
+
res, err := b.MapList(context.Background(), rowKey1)
|
| 870 |
+
require.Nil(t, err)
|
| 871 |
+
assert.Equal(t, row1Updated, res)
|
| 872 |
+
res, err = b.MapList(context.Background(), rowKey2)
|
| 873 |
+
require.Nil(t, err)
|
| 874 |
+
assert.Equal(t, res, row2Unchanged)
|
| 875 |
+
})
|
| 876 |
+
|
| 877 |
+
t.Run("validate the results prior to recovery", func(t *testing.T) {
|
| 878 |
+
rowKey1 := []byte("test1-key-1")
|
| 879 |
+
rowKey2 := []byte("test1-key-2")
|
| 880 |
+
|
| 881 |
+
expectedRow1 := []MapPair{
|
| 882 |
+
{
|
| 883 |
+
Key: []byte("row1-key1"),
|
| 884 |
+
Value: []byte("row1-key1-value2"),
|
| 885 |
+
}, {
|
| 886 |
+
Key: []byte("row1-key2"),
|
| 887 |
+
Value: []byte("row1-key2-value1"),
|
| 888 |
+
},
|
| 889 |
+
}
|
| 890 |
+
|
| 891 |
+
expectedRow2 := []MapPair{
|
| 892 |
+
{
|
| 893 |
+
Key: []byte("row2-key1"),
|
| 894 |
+
Value: []byte("row2-key1-value1"),
|
| 895 |
+
}, {
|
| 896 |
+
Key: []byte("row2-key2"),
|
| 897 |
+
Value: []byte("row2-key2-value1"),
|
| 898 |
+
},
|
| 899 |
+
}
|
| 900 |
+
|
| 901 |
+
res, err := b.MapList(context.Background(), rowKey1)
|
| 902 |
+
require.Nil(t, err)
|
| 903 |
+
assert.Equal(t, expectedRow1, res)
|
| 904 |
+
res, err = b.MapList(context.Background(), rowKey2)
|
| 905 |
+
require.Nil(t, err)
|
| 906 |
+
assert.Equal(t, expectedRow2, res)
|
| 907 |
+
})
|
| 908 |
+
|
| 909 |
+
t.Run("make sure the WAL is flushed", func(t *testing.T) {
|
| 910 |
+
require.Nil(t, b.WriteWAL())
|
| 911 |
+
})
|
| 912 |
+
|
| 913 |
+
t.Run("copy state into recovery folder and destroy original", func(t *testing.T) {
|
| 914 |
+
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s",
|
| 915 |
+
dirNameOriginal, dirNameRecovered))
|
| 916 |
+
var out bytes.Buffer
|
| 917 |
+
cmd.Stderr = &out
|
| 918 |
+
err := cmd.Run()
|
| 919 |
+
if err != nil {
|
| 920 |
+
fmt.Println(out.String())
|
| 921 |
+
t.Fatal(err)
|
| 922 |
+
}
|
| 923 |
+
b = nil
|
| 924 |
+
require.Nil(t, os.RemoveAll(dirNameOriginal))
|
| 925 |
+
})
|
| 926 |
+
|
| 927 |
+
var bRec *Bucket
|
| 928 |
+
|
| 929 |
+
t.Run("create new bucket from existing state", func(t *testing.T) {
|
| 930 |
+
b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil,
|
| 931 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 932 |
+
WithStrategy(StrategyMapCollection))
|
| 933 |
+
require.Nil(t, err)
|
| 934 |
+
|
| 935 |
+
// so big it effectively never triggers as part of this test
|
| 936 |
+
b.SetMemtableThreshold(1e9)
|
| 937 |
+
|
| 938 |
+
bRec = b
|
| 939 |
+
})
|
| 940 |
+
|
| 941 |
+
t.Run("validate the results after recovery", func(t *testing.T) {
|
| 942 |
+
rowKey1 := []byte("test1-key-1")
|
| 943 |
+
rowKey2 := []byte("test1-key-2")
|
| 944 |
+
|
| 945 |
+
expectedRow1 := []MapPair{
|
| 946 |
+
{
|
| 947 |
+
Key: []byte("row1-key1"),
|
| 948 |
+
Value: []byte("row1-key1-value2"),
|
| 949 |
+
}, {
|
| 950 |
+
Key: []byte("row1-key2"),
|
| 951 |
+
Value: []byte("row1-key2-value1"),
|
| 952 |
+
},
|
| 953 |
+
}
|
| 954 |
+
|
| 955 |
+
expectedRow2 := []MapPair{
|
| 956 |
+
{
|
| 957 |
+
Key: []byte("row2-key1"),
|
| 958 |
+
Value: []byte("row2-key1-value1"),
|
| 959 |
+
}, {
|
| 960 |
+
Key: []byte("row2-key2"),
|
| 961 |
+
Value: []byte("row2-key2-value1"),
|
| 962 |
+
},
|
| 963 |
+
}
|
| 964 |
+
|
| 965 |
+
res, err := bRec.MapList(context.Background(), rowKey1)
|
| 966 |
+
require.Nil(t, err)
|
| 967 |
+
assert.Equal(t, expectedRow1, res)
|
| 968 |
+
res, err = bRec.MapList(context.Background(), rowKey2)
|
| 969 |
+
require.Nil(t, err)
|
| 970 |
+
assert.Equal(t, expectedRow2, res)
|
| 971 |
+
})
|
| 972 |
+
})
|
| 973 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/red_black_tree_test.go
ADDED
|
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"crypto/rand"
|
| 16 |
+
"fmt"
|
| 17 |
+
"math"
|
| 18 |
+
"math/big"
|
| 19 |
+
"reflect"
|
| 20 |
+
"testing"
|
| 21 |
+
|
| 22 |
+
"github.com/stretchr/testify/require"
|
| 23 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/rbtree"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
const (
|
| 27 |
+
R = true
|
| 28 |
+
B = false
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
// This test adds keys to the RB tree. Afterwards the same nodes are added in the expected order, eg in the way
|
| 32 |
+
// the RB tree is expected to re-order the nodes
|
| 33 |
+
var rbTests = []struct {
|
| 34 |
+
name string
|
| 35 |
+
keys []uint
|
| 36 |
+
ReorderedKeys []uint
|
| 37 |
+
expectedColors []bool // with respect to the original keys
|
| 38 |
+
}{
|
| 39 |
+
{
|
| 40 |
+
"Requires recoloring but no reordering",
|
| 41 |
+
[]uint{61, 52, 83, 93},
|
| 42 |
+
[]uint{61, 52, 83, 93},
|
| 43 |
+
[]bool{B, B, B, R},
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"Requires left rotate around root",
|
| 47 |
+
[]uint{61, 83, 99},
|
| 48 |
+
[]uint{83, 61, 99},
|
| 49 |
+
[]bool{R, B, R},
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"Requires left rotate with more nodes",
|
| 53 |
+
[]uint{61, 52, 85, 93, 99},
|
| 54 |
+
[]uint{61, 52, 93, 85, 99},
|
| 55 |
+
[]bool{B, B, R, B, R},
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"Requires right and then left rotate",
|
| 59 |
+
[]uint{61, 52, 85, 93, 87},
|
| 60 |
+
[]uint{61, 52, 87, 85, 93},
|
| 61 |
+
[]bool{B, B, R, R, B},
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"Requires right rotate around root",
|
| 65 |
+
[]uint{61, 30, 10},
|
| 66 |
+
[]uint{30, 10, 61},
|
| 67 |
+
[]bool{R, B, R},
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"Requires right rotate with more nodes",
|
| 71 |
+
[]uint{61, 52, 85, 21, 10},
|
| 72 |
+
[]uint{61, 85, 21, 10, 52},
|
| 73 |
+
[]bool{B, R, B, B, R},
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"Requires left and then right rotate",
|
| 77 |
+
[]uint{61, 52, 85, 21, 36},
|
| 78 |
+
[]uint{61, 85, 36, 21, 52},
|
| 79 |
+
[]bool{B, R, B, R, B},
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"Require reordering for two nodes",
|
| 83 |
+
[]uint{61, 52, 40, 85, 105, 110},
|
| 84 |
+
[]uint{52, 40, 85, 61, 105, 110},
|
| 85 |
+
[]bool{B, B, B, R, B, R},
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"Ordered nodes increasing",
|
| 89 |
+
[]uint{1, 2, 3, 4, 5, 6, 7, 8},
|
| 90 |
+
[]uint{4, 2, 6, 1, 3, 5, 7, 8},
|
| 91 |
+
[]bool{B, R, B, B, B, R, B, R},
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"Ordered nodes decreasing",
|
| 95 |
+
[]uint{8, 7, 6, 5, 4, 3, 2, 1},
|
| 96 |
+
[]uint{5, 3, 7, 2, 4, 6, 8, 1},
|
| 97 |
+
[]bool{B, R, B, B, B, R, B, R},
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
"Multiple rotations along the tree and colour changes",
|
| 101 |
+
[]uint{166, 92, 33, 133, 227, 236, 71, 183, 18, 139, 245, 161},
|
| 102 |
+
[]uint{166, 92, 227, 33, 139, 183, 236, 18, 71, 133, 161, 245},
|
| 103 |
+
[]bool{B, R, B, R, R, B, R, B, R, B, R, R},
|
| 104 |
+
},
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
func TestRBTree(t *testing.T) {
|
| 108 |
+
for _, tt := range rbTests {
|
| 109 |
+
t.Run(tt.name, func(t *testing.T) {
|
| 110 |
+
tree := &binarySearchTree{}
|
| 111 |
+
for _, key := range tt.keys {
|
| 112 |
+
iByte := []byte{uint8(key)}
|
| 113 |
+
tree.insert(iByte, iByte, nil)
|
| 114 |
+
require.Empty(t, tree.root.parent)
|
| 115 |
+
}
|
| 116 |
+
validateRBTree(t, tree.root)
|
| 117 |
+
|
| 118 |
+
flattenTree := tree.flattenInOrder()
|
| 119 |
+
require.Equal(t, len(tt.keys), len(flattenTree)) // no entries got lost
|
| 120 |
+
|
| 121 |
+
// add tree with the same nodes in the "optimal" order to be able to compare their order afterwards
|
| 122 |
+
treeCorrectOrder := &binarySearchTree{}
|
| 123 |
+
for _, key := range tt.ReorderedKeys {
|
| 124 |
+
iByte := []byte{uint8(key)}
|
| 125 |
+
treeCorrectOrder.insert(iByte, iByte, nil)
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
flattenTreeInput := treeCorrectOrder.flattenInOrder()
|
| 129 |
+
for i := range flattenTree {
|
| 130 |
+
byteKey := flattenTree[i].key
|
| 131 |
+
originalIndex := getIndexInSlice(tt.keys, byteKey)
|
| 132 |
+
require.Equal(t, byteKey, flattenTreeInput[i].key)
|
| 133 |
+
require.Equal(t, flattenTree[i].colourIsRed, tt.expectedColors[originalIndex])
|
| 134 |
+
}
|
| 135 |
+
})
|
| 136 |
+
}
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
func TestRBTreeMap(t *testing.T) {
|
| 140 |
+
for _, tt := range rbTests {
|
| 141 |
+
t.Run(tt.name, func(t *testing.T) {
|
| 142 |
+
tree := &binarySearchTreeMap{}
|
| 143 |
+
for _, key := range tt.keys {
|
| 144 |
+
tree.insert([]byte{uint8(key)}, MapPair{
|
| 145 |
+
Key: []byte("map-key-1"),
|
| 146 |
+
Value: []byte("map-value-1"),
|
| 147 |
+
})
|
| 148 |
+
require.Empty(t, tree.root.parent)
|
| 149 |
+
}
|
| 150 |
+
validateRBTree(t, tree.root)
|
| 151 |
+
|
| 152 |
+
flatten_tree := tree.flattenInOrder()
|
| 153 |
+
require.Equal(t, len(tt.keys), len(flatten_tree)) // no entries got lost
|
| 154 |
+
|
| 155 |
+
// add tree with the same nodes in the "optimal" order to be able to compare their order afterwards
|
| 156 |
+
treeCorrectOrder := &binarySearchTreeMap{}
|
| 157 |
+
for _, key := range tt.ReorderedKeys {
|
| 158 |
+
treeCorrectOrder.insert([]byte{uint8(key)}, MapPair{
|
| 159 |
+
Key: []byte("map-key-1"),
|
| 160 |
+
Value: []byte("map-value-1"),
|
| 161 |
+
})
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
flatten_tree_input := treeCorrectOrder.flattenInOrder()
|
| 165 |
+
for i := range flatten_tree {
|
| 166 |
+
byte_key := flatten_tree[i].key
|
| 167 |
+
originalIndex := getIndexInSlice(tt.keys, byte_key)
|
| 168 |
+
require.Equal(t, byte_key, flatten_tree_input[i].key)
|
| 169 |
+
require.Equal(t, flatten_tree[i].colourIsRed, tt.expectedColors[originalIndex])
|
| 170 |
+
}
|
| 171 |
+
})
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
func TestRBTreeMulti(t *testing.T) {
|
| 176 |
+
for _, tt := range rbTests {
|
| 177 |
+
t.Run(tt.name, func(t *testing.T) {
|
| 178 |
+
tree := &binarySearchTreeMulti{}
|
| 179 |
+
for _, key := range tt.keys {
|
| 180 |
+
values := []value{}
|
| 181 |
+
for j := uint(0); j < 5; j++ {
|
| 182 |
+
values = append(values, value{value: []byte{uint8(key * j)}, tombstone: false})
|
| 183 |
+
}
|
| 184 |
+
tree.insert([]byte{uint8(key)}, values)
|
| 185 |
+
require.Empty(t, tree.root.parent)
|
| 186 |
+
}
|
| 187 |
+
validateRBTree(t, tree.root)
|
| 188 |
+
|
| 189 |
+
flatten_tree := tree.flattenInOrder()
|
| 190 |
+
require.Equal(t, len(tt.keys), len(flatten_tree)) // no entries got lost
|
| 191 |
+
|
| 192 |
+
// add tree with the same nodes in the "optimal" order to be able to compare their order afterwards
|
| 193 |
+
treeCorrectOrder := &binarySearchTreeMulti{}
|
| 194 |
+
for _, key := range tt.ReorderedKeys {
|
| 195 |
+
values := []value{}
|
| 196 |
+
for j := uint(0); j < 5; j++ {
|
| 197 |
+
values = append(values, value{value: []byte{uint8(key * j)}, tombstone: false})
|
| 198 |
+
}
|
| 199 |
+
treeCorrectOrder.insert([]byte{uint8(key)}, values)
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
flatten_tree_input := treeCorrectOrder.flattenInOrder()
|
| 203 |
+
for i := range flatten_tree {
|
| 204 |
+
byte_key := flatten_tree[i].key
|
| 205 |
+
originalIndex := getIndexInSlice(tt.keys, byte_key)
|
| 206 |
+
require.Equal(t, byte_key, flatten_tree_input[i].key)
|
| 207 |
+
require.Equal(t, flatten_tree[i].colourIsRed, tt.expectedColors[originalIndex])
|
| 208 |
+
}
|
| 209 |
+
})
|
| 210 |
+
}
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
// add keys as a) normal keys b) tombstone keys and c) half tombstone, half normal.
|
| 214 |
+
// The resulting (rebalanced) trees must have the same order and colors
|
| 215 |
+
var tombstoneTests = []struct {
|
| 216 |
+
name string
|
| 217 |
+
keys []uint
|
| 218 |
+
}{
|
| 219 |
+
{"Rotate left around root", []uint{61, 83, 99}},
|
| 220 |
+
{"Rotate right around root", []uint{61, 30, 10}},
|
| 221 |
+
{"Multiple rotations along the tree and colour changes", []uint{166, 92, 33, 133, 227, 236, 71, 183, 18, 139, 245, 161}},
|
| 222 |
+
{"Ordered nodes increasing", []uint{1, 2, 3, 4, 5, 6, 7, 8}},
|
| 223 |
+
{"Ordered nodes decreasing", []uint{8, 7, 6, 5, 4, 3, 2, 1}},
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
func TestRBTrees_Tombstones(t *testing.T) {
|
| 227 |
+
for _, tt := range tombstoneTests {
|
| 228 |
+
t.Run(tt.name, func(t *testing.T) {
|
| 229 |
+
treeNormal := &binarySearchTree{}
|
| 230 |
+
treeTombstone := &binarySearchTree{}
|
| 231 |
+
treeHalfHalf := &binarySearchTree{}
|
| 232 |
+
for i, key := range tt.keys {
|
| 233 |
+
iByte := []byte{uint8(key)}
|
| 234 |
+
treeNormal.insert(iByte, iByte, nil)
|
| 235 |
+
treeTombstone.setTombstone(iByte, nil, nil)
|
| 236 |
+
if i%2 == 0 {
|
| 237 |
+
treeHalfHalf.insert(iByte, iByte, nil)
|
| 238 |
+
} else {
|
| 239 |
+
treeHalfHalf.setTombstone(iByte, nil, nil)
|
| 240 |
+
}
|
| 241 |
+
}
|
| 242 |
+
validateRBTree(t, treeNormal.root)
|
| 243 |
+
validateRBTree(t, treeTombstone.root)
|
| 244 |
+
validateRBTree(t, treeHalfHalf.root)
|
| 245 |
+
|
| 246 |
+
treeNormalFlatten := treeNormal.flattenInOrder()
|
| 247 |
+
treeTombstoneFlatten := treeTombstone.flattenInOrder()
|
| 248 |
+
treeHalfHalfFlatten := treeHalfHalf.flattenInOrder()
|
| 249 |
+
require.Equal(t, len(tt.keys), len(treeNormalFlatten))
|
| 250 |
+
require.Equal(t, len(tt.keys), len(treeTombstoneFlatten))
|
| 251 |
+
require.Equal(t, len(tt.keys), len(treeHalfHalfFlatten))
|
| 252 |
+
|
| 253 |
+
for i := range treeNormalFlatten {
|
| 254 |
+
require.Equal(t, treeNormalFlatten[i].key, treeTombstoneFlatten[i].key)
|
| 255 |
+
require.Equal(t, treeNormalFlatten[i].key, treeHalfHalfFlatten[i].key)
|
| 256 |
+
require.Equal(t, treeNormalFlatten[i].colourIsRed, treeTombstoneFlatten[i].colourIsRed)
|
| 257 |
+
require.Equal(t, treeNormalFlatten[i].colourIsRed, treeHalfHalfFlatten[i].colourIsRed)
|
| 258 |
+
}
|
| 259 |
+
})
|
| 260 |
+
}
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
type void struct{}
|
| 264 |
+
|
| 265 |
+
var member void
|
| 266 |
+
|
| 267 |
+
func mustRandIntn(max int64) int {
|
| 268 |
+
randInt, err := rand.Int(rand.Reader, big.NewInt(max))
|
| 269 |
+
if err != nil {
|
| 270 |
+
panic(fmt.Sprintf("mustRandIntn error: %v", err))
|
| 271 |
+
}
|
| 272 |
+
return int(randInt.Int64())
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
func TestRBTrees_Random(t *testing.T) {
|
| 276 |
+
tree := &binarySearchTree{}
|
| 277 |
+
amount := mustRandIntn(100000)
|
| 278 |
+
keySize := mustRandIntn(100)
|
| 279 |
+
uniqueKeys := make(map[string]void)
|
| 280 |
+
for i := 0; i < amount; i++ {
|
| 281 |
+
key := make([]byte, keySize)
|
| 282 |
+
rand.Read(key)
|
| 283 |
+
uniqueKeys[string(key)] = member
|
| 284 |
+
if mustRandIntn(5) == 1 { // add 20% of all entries as tombstone
|
| 285 |
+
tree.setTombstone(key, nil, nil)
|
| 286 |
+
} else {
|
| 287 |
+
tree.insert(key, key, nil)
|
| 288 |
+
}
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
// all added keys are still part of the tree
|
| 292 |
+
treeFlattened := tree.flattenInOrder()
|
| 293 |
+
require.Equal(t, len(uniqueKeys), len(treeFlattened))
|
| 294 |
+
for _, entry := range treeFlattened {
|
| 295 |
+
_, ok := uniqueKeys[string(entry.key)]
|
| 296 |
+
require.True(t, ok)
|
| 297 |
+
}
|
| 298 |
+
validateRBTree(t, tree.root)
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
func TestRBTreesMap_Random(t *testing.T) {
|
| 302 |
+
tree := &binarySearchTreeMap{}
|
| 303 |
+
amount := mustRandIntn(100000)
|
| 304 |
+
keySize := mustRandIntn(100)
|
| 305 |
+
uniqueKeys := make(map[string]void)
|
| 306 |
+
for i := 0; i < amount; i++ {
|
| 307 |
+
key := make([]byte, keySize)
|
| 308 |
+
rand.Read(key)
|
| 309 |
+
uniqueKeys[string(key)] = member
|
| 310 |
+
tree.insert(key, MapPair{
|
| 311 |
+
Key: []byte("map-key-1"),
|
| 312 |
+
Value: []byte("map-value-1"),
|
| 313 |
+
})
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
// all added keys are still part of the tree
|
| 317 |
+
treeFlattened := tree.flattenInOrder()
|
| 318 |
+
require.Equal(t, len(uniqueKeys), len(treeFlattened))
|
| 319 |
+
for _, entry := range treeFlattened {
|
| 320 |
+
_, ok := uniqueKeys[string(entry.key)]
|
| 321 |
+
require.True(t, ok)
|
| 322 |
+
}
|
| 323 |
+
validateRBTree(t, tree.root)
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
func TestRBTreesMulti_Random(t *testing.T) {
|
| 327 |
+
tree := &binarySearchTreeMulti{}
|
| 328 |
+
amount := mustRandIntn(100000)
|
| 329 |
+
keySize := mustRandIntn(100)
|
| 330 |
+
uniqueKeys := make(map[string]void)
|
| 331 |
+
for i := 0; i < amount; i++ {
|
| 332 |
+
key := make([]byte, keySize)
|
| 333 |
+
rand.Read(key)
|
| 334 |
+
uniqueKeys[string(key)] = member
|
| 335 |
+
values := []value{}
|
| 336 |
+
for j := 0; j < 5; j++ {
|
| 337 |
+
values = append(values, value{value: []byte{uint8(i * j)}, tombstone: false})
|
| 338 |
+
}
|
| 339 |
+
tree.insert(key, values)
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
// all added keys are still part of the tree
|
| 343 |
+
treeFlattened := tree.flattenInOrder()
|
| 344 |
+
require.Equal(t, len(uniqueKeys), len(treeFlattened))
|
| 345 |
+
for _, entry := range treeFlattened {
|
| 346 |
+
_, ok := uniqueKeys[string(entry.key)]
|
| 347 |
+
require.True(t, ok)
|
| 348 |
+
}
|
| 349 |
+
validateRBTree(t, tree.root)
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
func getIndexInSlice(reorderedKeys []uint, key []byte) int {
|
| 353 |
+
for i, v := range reorderedKeys {
|
| 354 |
+
if v == uint(key[0]) {
|
| 355 |
+
return i
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
return -1
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
// Checks if a tree is a RB tree
|
| 362 |
+
//
|
| 363 |
+
// There are several properties that valid RB trees follow:
|
| 364 |
+
// 1) The root node is always black
|
| 365 |
+
// 2) The max depth of a tree is 2* Log2(N+1), where N is the number of nodes
|
| 366 |
+
// 3) Every path from root to leave has the same number of _black_ nodes
|
| 367 |
+
// 4) Red nodes only have black (or nil) children
|
| 368 |
+
//
|
| 369 |
+
// In addition this also validates some general tree properties:
|
| 370 |
+
// - root has no parent
|
| 371 |
+
// - if node A is a child of B, B must be the parent of A)
|
| 372 |
+
func validateRBTree(t *testing.T, rootNode rbtree.Node) {
|
| 373 |
+
require.False(t, rootNode.IsRed())
|
| 374 |
+
require.True(t, rootNode.Parent().IsNil())
|
| 375 |
+
|
| 376 |
+
treeDepth, nodeCount, _ := walkTree(t, rootNode)
|
| 377 |
+
maxDepth := 2 * math.Log2(float64(nodeCount)+1)
|
| 378 |
+
require.True(t, treeDepth <= int(maxDepth))
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
// Walks through the tree and counts the depth, number of nodes and number of black nodes
|
| 382 |
+
func walkTree(t *testing.T, node rbtree.Node) (int, int, int) {
|
| 383 |
+
if reflect.ValueOf(node).IsNil() {
|
| 384 |
+
return 0, 0, 0
|
| 385 |
+
}
|
| 386 |
+
leftNode := node.Left()
|
| 387 |
+
leftNodeIsNil := reflect.ValueOf(leftNode).IsNil()
|
| 388 |
+
rightNode := node.Right()
|
| 389 |
+
rightNodeIsNil := reflect.ValueOf(rightNode).IsNil()
|
| 390 |
+
|
| 391 |
+
// validate parent/child connections
|
| 392 |
+
if !rightNodeIsNil {
|
| 393 |
+
require.Equal(t, rightNode.Parent(), node)
|
| 394 |
+
}
|
| 395 |
+
if !leftNodeIsNil {
|
| 396 |
+
require.Equal(t, leftNode.Parent(), node)
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
// red nodes need black (or nil) children
|
| 400 |
+
if node.IsRed() {
|
| 401 |
+
require.True(t, leftNodeIsNil || !node.Left().IsRed())
|
| 402 |
+
require.True(t, rightNodeIsNil || !node.Left().IsRed())
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
blackNode := int(1)
|
| 406 |
+
if node.IsRed() {
|
| 407 |
+
blackNode = 0
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
if node.Right().IsNil() && node.Left().IsNil() {
|
| 411 |
+
return 1, 1, blackNode
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
depthRight, nodeCountRight, blackNodesDepthRight := walkTree(t, node.Right())
|
| 415 |
+
depthLeft, nodeCountLeft, blackNodesDepthLeft := walkTree(t, node.Left())
|
| 416 |
+
require.Equal(t, blackNodesDepthRight, blackNodesDepthLeft)
|
| 417 |
+
|
| 418 |
+
nodeCount := nodeCountLeft + nodeCountRight + 1
|
| 419 |
+
if depthRight > depthLeft {
|
| 420 |
+
return depthRight + 1, nodeCount, blackNodesDepthRight + blackNode
|
| 421 |
+
} else {
|
| 422 |
+
return depthLeft + 1, nodeCount, blackNodesDepthRight + blackNode
|
| 423 |
+
}
|
| 424 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/search_segment.go
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"context"
|
| 16 |
+
"fmt"
|
| 17 |
+
"math"
|
| 18 |
+
"sort"
|
| 19 |
+
"strconv"
|
| 20 |
+
|
| 21 |
+
"github.com/sirupsen/logrus"
|
| 22 |
+
"github.com/weaviate/weaviate/adapters/repos/db/inverted/terms"
|
| 23 |
+
"github.com/weaviate/weaviate/adapters/repos/db/priorityqueue"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
func DoBlockMaxWand(ctx context.Context, limit int, results Terms, averagePropLength float64, additionalExplanations bool,
|
| 27 |
+
termCount, minimumOrTokensMatch int, logger logrus.FieldLogger,
|
| 28 |
+
) (*priorityqueue.Queue[[]*terms.DocPointerWithScore], error) {
|
| 29 |
+
var docInfos []*terms.DocPointerWithScore
|
| 30 |
+
topKHeap := priorityqueue.NewMinWithId[[]*terms.DocPointerWithScore](limit)
|
| 31 |
+
worstDist := float64(-10000) // tf score can be negative
|
| 32 |
+
sort.Sort(results)
|
| 33 |
+
iterations := 0
|
| 34 |
+
var firstNonExhausted int
|
| 35 |
+
pivotID := uint64(0)
|
| 36 |
+
var pivotPoint int
|
| 37 |
+
upperBound := float32(0)
|
| 38 |
+
|
| 39 |
+
for {
|
| 40 |
+
iterations++
|
| 41 |
+
|
| 42 |
+
if iterations%100000 == 0 && ctx != nil && ctx.Err() != nil {
|
| 43 |
+
segmentPath := ""
|
| 44 |
+
terms := ""
|
| 45 |
+
filterCardinality := -1
|
| 46 |
+
for _, r := range results {
|
| 47 |
+
if r == nil {
|
| 48 |
+
continue
|
| 49 |
+
}
|
| 50 |
+
if r.segment != nil {
|
| 51 |
+
segmentPath = r.segment.path
|
| 52 |
+
if r.filterDocIds != nil {
|
| 53 |
+
filterCardinality = r.filterDocIds.GetCardinality()
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
terms += r.QueryTerm() + ":" + strconv.Itoa(int(r.IdPointer())) + ":" + strconv.Itoa(r.Count()) + ", "
|
| 57 |
+
}
|
| 58 |
+
logger.WithFields(logrus.Fields{
|
| 59 |
+
"segment": segmentPath,
|
| 60 |
+
"iterations": iterations,
|
| 61 |
+
"pivotID": pivotID,
|
| 62 |
+
"firstNonExhausted": firstNonExhausted,
|
| 63 |
+
"lenResults": len(results),
|
| 64 |
+
"pivotPoint": pivotPoint,
|
| 65 |
+
"upperBound": upperBound,
|
| 66 |
+
"terms": terms,
|
| 67 |
+
"filterCardinality": filterCardinality,
|
| 68 |
+
"limit": limit,
|
| 69 |
+
}).Warnf("DoBlockMaxWand: search timed out, returning partial results")
|
| 70 |
+
return topKHeap, fmt.Errorf("DoBlockMaxWand: search timed out, returning partial results")
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
cumScore := float64(0)
|
| 74 |
+
firstNonExhausted = -1
|
| 75 |
+
pivotID = math.MaxUint64
|
| 76 |
+
|
| 77 |
+
for pivotPoint = 0; pivotPoint < len(results); pivotPoint++ {
|
| 78 |
+
if results[pivotPoint].exhausted {
|
| 79 |
+
continue
|
| 80 |
+
}
|
| 81 |
+
if firstNonExhausted == -1 {
|
| 82 |
+
firstNonExhausted = pivotPoint
|
| 83 |
+
}
|
| 84 |
+
cumScore += float64(results[pivotPoint].Idf())
|
| 85 |
+
if cumScore >= worstDist {
|
| 86 |
+
pivotID = results[pivotPoint].idPointer
|
| 87 |
+
for i := pivotPoint + 1; i < len(results); i++ {
|
| 88 |
+
if results[i].idPointer != pivotID {
|
| 89 |
+
break
|
| 90 |
+
}
|
| 91 |
+
pivotPoint = i
|
| 92 |
+
}
|
| 93 |
+
break
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
if firstNonExhausted == -1 || pivotID == math.MaxUint64 {
|
| 97 |
+
return topKHeap, nil
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
upperBound = float32(0)
|
| 101 |
+
for i := 0; i <= pivotPoint; i++ {
|
| 102 |
+
if results[i].exhausted {
|
| 103 |
+
continue
|
| 104 |
+
}
|
| 105 |
+
if results[i].currentBlockMaxId < pivotID {
|
| 106 |
+
results[i].AdvanceAtLeastShallow(pivotID)
|
| 107 |
+
}
|
| 108 |
+
upperBound += results[i].currentBlockImpact
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
if topKHeap.ShouldEnqueue(upperBound, limit) {
|
| 112 |
+
if additionalExplanations {
|
| 113 |
+
docInfos = make([]*terms.DocPointerWithScore, termCount)
|
| 114 |
+
}
|
| 115 |
+
if pivotID == results[firstNonExhausted].idPointer {
|
| 116 |
+
score := 0.0
|
| 117 |
+
termsMatched := 0
|
| 118 |
+
for _, term := range results {
|
| 119 |
+
if term.idPointer != pivotID {
|
| 120 |
+
break
|
| 121 |
+
}
|
| 122 |
+
termsMatched++
|
| 123 |
+
_, s, d := term.Score(averagePropLength, additionalExplanations)
|
| 124 |
+
score += s
|
| 125 |
+
upperBound -= term.currentBlockImpact - float32(s)
|
| 126 |
+
|
| 127 |
+
if additionalExplanations {
|
| 128 |
+
docInfos[term.QueryTermIndex()] = d
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
}
|
| 132 |
+
for _, term := range results {
|
| 133 |
+
if !term.exhausted && term.idPointer != pivotID {
|
| 134 |
+
break
|
| 135 |
+
}
|
| 136 |
+
term.Advance()
|
| 137 |
+
}
|
| 138 |
+
if topKHeap.ShouldEnqueue(float32(score), limit) && termsMatched >= minimumOrTokensMatch {
|
| 139 |
+
topKHeap.InsertAndPop(pivotID, score, limit, &worstDist, docInfos)
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
sort.Sort(results)
|
| 143 |
+
|
| 144 |
+
} else {
|
| 145 |
+
nextList := pivotPoint
|
| 146 |
+
for results[nextList].idPointer == pivotID {
|
| 147 |
+
nextList--
|
| 148 |
+
}
|
| 149 |
+
results[nextList].AdvanceAtLeast(pivotID)
|
| 150 |
+
|
| 151 |
+
// sort partial
|
| 152 |
+
for i := nextList + 1; i < len(results); i++ {
|
| 153 |
+
if results[i].idPointer < results[i-1].idPointer {
|
| 154 |
+
// swap
|
| 155 |
+
results[i], results[i-1] = results[i-1], results[i]
|
| 156 |
+
} else {
|
| 157 |
+
break
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
}
|
| 162 |
+
} else {
|
| 163 |
+
nextList := pivotPoint
|
| 164 |
+
maxWeight := results[nextList].Idf()
|
| 165 |
+
|
| 166 |
+
for i := 0; i < pivotPoint; i++ {
|
| 167 |
+
if results[i].Idf() > maxWeight {
|
| 168 |
+
nextList = i
|
| 169 |
+
maxWeight = results[i].Idf()
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
// max uint
|
| 174 |
+
next := uint64(math.MaxUint64)
|
| 175 |
+
|
| 176 |
+
for i := 0; i <= pivotPoint; i++ {
|
| 177 |
+
if results[i].currentBlockMaxId < next {
|
| 178 |
+
next = results[i].currentBlockMaxId
|
| 179 |
+
}
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
next += 1
|
| 183 |
+
|
| 184 |
+
if pivotPoint+1 < len(results) && results[pivotPoint+1].idPointer < next {
|
| 185 |
+
next = results[pivotPoint+1].idPointer
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
if next <= pivotID {
|
| 189 |
+
next = pivotID + 1
|
| 190 |
+
}
|
| 191 |
+
results[nextList].AdvanceAtLeast(next)
|
| 192 |
+
|
| 193 |
+
for i := nextList + 1; i < len(results); i++ {
|
| 194 |
+
if results[i].idPointer < results[i-1].idPointer {
|
| 195 |
+
// swap
|
| 196 |
+
results[i], results[i-1] = results[i-1], results[i]
|
| 197 |
+
} else if results[i].exhausted && i < len(results)-1 {
|
| 198 |
+
results[i], results[i+1] = results[i+1], results[i]
|
| 199 |
+
}
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
func DoBlockMaxAnd(ctx context.Context, limit int, resultsByTerm Terms, averagePropLength float64, additionalExplanations bool,
|
| 208 |
+
termCount int, minimumOrTokensMatch int, logger logrus.FieldLogger,
|
| 209 |
+
) *priorityqueue.Queue[[]*terms.DocPointerWithScore] {
|
| 210 |
+
results := TermsBySize(resultsByTerm)
|
| 211 |
+
var docInfos []*terms.DocPointerWithScore
|
| 212 |
+
topKHeap := priorityqueue.NewMinWithId[[]*terms.DocPointerWithScore](limit)
|
| 213 |
+
worstDist := float64(-10000) // tf score can be negative
|
| 214 |
+
sort.Sort(results)
|
| 215 |
+
iterations := 0
|
| 216 |
+
pivotID := uint64(0)
|
| 217 |
+
upperBound := float32(0)
|
| 218 |
+
|
| 219 |
+
if minimumOrTokensMatch > len(results) {
|
| 220 |
+
return topKHeap
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
for {
|
| 224 |
+
iterations++
|
| 225 |
+
|
| 226 |
+
if iterations%100000 == 0 && ctx != nil && ctx.Err() != nil {
|
| 227 |
+
segmentPath := ""
|
| 228 |
+
terms := ""
|
| 229 |
+
filterCardinality := -1
|
| 230 |
+
for _, r := range results {
|
| 231 |
+
if r == nil {
|
| 232 |
+
continue
|
| 233 |
+
}
|
| 234 |
+
if r.segment != nil {
|
| 235 |
+
segmentPath = r.segment.path
|
| 236 |
+
if r.filterDocIds != nil {
|
| 237 |
+
filterCardinality = r.filterDocIds.GetCardinality()
|
| 238 |
+
}
|
| 239 |
+
}
|
| 240 |
+
terms += r.QueryTerm() + ":" + strconv.Itoa(int(r.IdPointer())) + ":" + strconv.Itoa(r.Count()) + ", "
|
| 241 |
+
}
|
| 242 |
+
logger.WithFields(logrus.Fields{
|
| 243 |
+
"segment": segmentPath,
|
| 244 |
+
"iterations": iterations,
|
| 245 |
+
"pivotID": pivotID,
|
| 246 |
+
"lenResults": len(results),
|
| 247 |
+
"upperBound": upperBound,
|
| 248 |
+
"terms": terms,
|
| 249 |
+
"filterCardinality": filterCardinality,
|
| 250 |
+
"limit": limit,
|
| 251 |
+
}).Warnf("DoBlockMaxAnd: search timed out, returning partial results")
|
| 252 |
+
return topKHeap
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
for i := 0; i < len(results); i++ {
|
| 256 |
+
if results[i].exhausted {
|
| 257 |
+
return topKHeap
|
| 258 |
+
}
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
results[0].AdvanceAtLeast(pivotID)
|
| 262 |
+
|
| 263 |
+
if results[0].idPointer == math.MaxUint64 {
|
| 264 |
+
return topKHeap
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
pivotID = results[0].idPointer
|
| 268 |
+
|
| 269 |
+
for i := 1; i < len(results); i++ {
|
| 270 |
+
results[i].AdvanceAtLeastShallow(pivotID)
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
upperBound = float32(0)
|
| 274 |
+
for i := 0; i < len(results); i++ {
|
| 275 |
+
upperBound += results[i].currentBlockImpact
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
if topKHeap.ShouldEnqueue(upperBound, limit) {
|
| 279 |
+
isCandidate := true
|
| 280 |
+
for i := 1; i < len(results); i++ {
|
| 281 |
+
results[i].AdvanceAtLeast(pivotID)
|
| 282 |
+
if results[i].idPointer != pivotID {
|
| 283 |
+
isCandidate = false
|
| 284 |
+
break
|
| 285 |
+
}
|
| 286 |
+
}
|
| 287 |
+
if isCandidate {
|
| 288 |
+
score := 0.0
|
| 289 |
+
if additionalExplanations {
|
| 290 |
+
docInfos = make([]*terms.DocPointerWithScore, termCount)
|
| 291 |
+
}
|
| 292 |
+
for _, term := range results {
|
| 293 |
+
_, s, d := term.Score(averagePropLength, additionalExplanations)
|
| 294 |
+
score += s
|
| 295 |
+
if additionalExplanations {
|
| 296 |
+
docInfos[term.QueryTermIndex()] = d
|
| 297 |
+
}
|
| 298 |
+
term.Advance()
|
| 299 |
+
}
|
| 300 |
+
if topKHeap.ShouldEnqueue(float32(score), limit) {
|
| 301 |
+
topKHeap.InsertAndPop(pivotID, score, limit, &worstDist, docInfos)
|
| 302 |
+
}
|
| 303 |
+
} else {
|
| 304 |
+
pivotID += 1
|
| 305 |
+
}
|
| 306 |
+
} else {
|
| 307 |
+
|
| 308 |
+
// max uint
|
| 309 |
+
pivotID = uint64(math.MaxUint64)
|
| 310 |
+
|
| 311 |
+
for i := 0; i < len(results); i++ {
|
| 312 |
+
if results[i].currentBlockMaxId < pivotID {
|
| 313 |
+
pivotID = results[i].currentBlockMaxId
|
| 314 |
+
}
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
pivotID += 1
|
| 318 |
+
}
|
| 319 |
+
}
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
func DoWand(limit int, results *terms.Terms, averagePropLength float64, additionalExplanations bool,
|
| 323 |
+
minimumOrTokensMatch int,
|
| 324 |
+
) *priorityqueue.Queue[[]*terms.DocPointerWithScore] {
|
| 325 |
+
topKHeap := priorityqueue.NewMinWithId[[]*terms.DocPointerWithScore](limit)
|
| 326 |
+
worstDist := float64(-10000) // tf score can be negative
|
| 327 |
+
sort.Sort(results)
|
| 328 |
+
for {
|
| 329 |
+
|
| 330 |
+
if results.CompletelyExhausted() || results.Pivot(worstDist) {
|
| 331 |
+
return topKHeap
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
id, score, additional, ok := results.ScoreNext(averagePropLength, additionalExplanations, minimumOrTokensMatch)
|
| 335 |
+
results.SortFull()
|
| 336 |
+
if topKHeap.ShouldEnqueue(float32(score), limit) && ok {
|
| 337 |
+
topKHeap.InsertAndPop(id, score, limit, &worstDist, additional)
|
| 338 |
+
}
|
| 339 |
+
}
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
type Terms []*SegmentBlockMax
|
| 343 |
+
|
| 344 |
+
// provide sort interface for
|
| 345 |
+
func (t Terms) Len() int {
|
| 346 |
+
return len(t)
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
func (t Terms) Less(i, j int) bool {
|
| 350 |
+
return t[i].idPointer < t[j].idPointer
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
func (t Terms) Swap(i, j int) {
|
| 354 |
+
t[i], t[j] = t[j], t[i]
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
type TermsBySize []*SegmentBlockMax
|
| 358 |
+
|
| 359 |
+
// provide sort interface for
|
| 360 |
+
func (t TermsBySize) Len() int {
|
| 361 |
+
return len(t)
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
func (t TermsBySize) Less(i, j int) bool {
|
| 365 |
+
return t[i].Count() < t[j].Count()
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
func (t TermsBySize) Swap(i, j int) {
|
| 369 |
+
t[i], t[j] = t[j], t[i]
|
| 370 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment.go
ADDED
|
@@ -0,0 +1,683 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"bufio"
|
| 16 |
+
"bytes"
|
| 17 |
+
"fmt"
|
| 18 |
+
"io"
|
| 19 |
+
"os"
|
| 20 |
+
"sync"
|
| 21 |
+
|
| 22 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 23 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
|
| 24 |
+
|
| 25 |
+
"github.com/pkg/errors"
|
| 26 |
+
|
| 27 |
+
"github.com/bits-and-blooms/bloom/v3"
|
| 28 |
+
"github.com/prometheus/client_golang/prometheus"
|
| 29 |
+
"github.com/sirupsen/logrus"
|
| 30 |
+
"github.com/weaviate/sroar"
|
| 31 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 32 |
+
"github.com/weaviate/weaviate/entities/diskio"
|
| 33 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 34 |
+
entsentry "github.com/weaviate/weaviate/entities/sentry"
|
| 35 |
+
"github.com/weaviate/weaviate/usecases/memwatch"
|
| 36 |
+
"github.com/weaviate/weaviate/usecases/mmap"
|
| 37 |
+
"github.com/weaviate/weaviate/usecases/monitoring"
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
type Segment interface {
|
| 41 |
+
getPath() string
|
| 42 |
+
setPath(path string)
|
| 43 |
+
getStrategy() segmentindex.Strategy
|
| 44 |
+
getSecondaryIndexCount() uint16
|
| 45 |
+
getLevel() uint16
|
| 46 |
+
getSize() int64
|
| 47 |
+
setSize(size int64)
|
| 48 |
+
|
| 49 |
+
PayloadSize() int
|
| 50 |
+
close() error
|
| 51 |
+
get(key []byte) ([]byte, error)
|
| 52 |
+
getBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, []byte, error)
|
| 53 |
+
getCollection(key []byte) ([]value, error)
|
| 54 |
+
getInvertedData() *segmentInvertedData
|
| 55 |
+
getSegment() *segment
|
| 56 |
+
isLoaded() bool
|
| 57 |
+
markForDeletion() error
|
| 58 |
+
MergeTombstones(other *sroar.Bitmap) (*sroar.Bitmap, error)
|
| 59 |
+
newCollectionCursor() *segmentCursorCollection
|
| 60 |
+
newCollectionCursorReusable() *segmentCursorCollectionReusable
|
| 61 |
+
newCursor() *segmentCursorReplace
|
| 62 |
+
newCursorWithSecondaryIndex(pos int) *segmentCursorReplace
|
| 63 |
+
newMapCursor() *segmentCursorMap
|
| 64 |
+
newNodeReader(offset nodeOffset, operation string) (*nodeReader, error)
|
| 65 |
+
newRoaringSetCursor() *roaringset.SegmentCursor
|
| 66 |
+
newRoaringSetRangeCursor() roaringsetrange.SegmentCursor
|
| 67 |
+
newRoaringSetRangeReader() *roaringsetrange.SegmentReader
|
| 68 |
+
quantileKeys(q int) [][]byte
|
| 69 |
+
ReadOnlyTombstones() (*sroar.Bitmap, error)
|
| 70 |
+
replaceStratParseData(in []byte) ([]byte, []byte, error)
|
| 71 |
+
roaringSetGet(key []byte, bitmapBufPool roaringset.BitmapBufPool) (roaringset.BitmapLayer, func(), error)
|
| 72 |
+
roaringSetMergeWith(key []byte, input roaringset.BitmapLayer, bitmapBufPool roaringset.BitmapBufPool) error
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
type segment struct {
|
| 76 |
+
path string
|
| 77 |
+
metaPaths []string
|
| 78 |
+
level uint16
|
| 79 |
+
secondaryIndexCount uint16
|
| 80 |
+
version uint16
|
| 81 |
+
segmentStartPos uint64
|
| 82 |
+
segmentEndPos uint64
|
| 83 |
+
dataStartPos uint64
|
| 84 |
+
dataEndPos uint64
|
| 85 |
+
contents []byte
|
| 86 |
+
contentFile *os.File
|
| 87 |
+
strategy segmentindex.Strategy
|
| 88 |
+
index diskIndex
|
| 89 |
+
secondaryIndices []diskIndex
|
| 90 |
+
logger logrus.FieldLogger
|
| 91 |
+
metrics *Metrics
|
| 92 |
+
size int64
|
| 93 |
+
readFromMemory bool
|
| 94 |
+
unMapContents bool
|
| 95 |
+
|
| 96 |
+
useBloomFilter bool // see bucket for more datails
|
| 97 |
+
bloomFilter *bloom.BloomFilter
|
| 98 |
+
secondaryBloomFilters []*bloom.BloomFilter
|
| 99 |
+
bloomFilterMetrics *bloomFilterMetrics
|
| 100 |
+
|
| 101 |
+
// the net addition this segment adds with respect to all previous segments
|
| 102 |
+
calcCountNetAdditions bool // see bucket for more datails
|
| 103 |
+
countNetAdditions int
|
| 104 |
+
|
| 105 |
+
invertedHeader *segmentindex.HeaderInverted
|
| 106 |
+
invertedData *segmentInvertedData
|
| 107 |
+
|
| 108 |
+
observeMetaWrite diskio.MeteredWriterCallback // used for precomputing meta (cna + bloom)
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
type diskIndex interface {
|
| 112 |
+
// Get return lsmkv.NotFound in case no node can be found
|
| 113 |
+
Get(key []byte) (segmentindex.Node, error)
|
| 114 |
+
|
| 115 |
+
// Seek returns lsmkv.NotFound in case the seek value is larger than
|
| 116 |
+
// the highest value in the collection, otherwise it returns the next highest
|
| 117 |
+
// value (or the exact value if present)
|
| 118 |
+
Seek(key []byte) (segmentindex.Node, error)
|
| 119 |
+
|
| 120 |
+
Next(key []byte) (segmentindex.Node, error)
|
| 121 |
+
|
| 122 |
+
// AllKeys in no specific order, e.g. for building a bloom filter
|
| 123 |
+
AllKeys() ([][]byte, error)
|
| 124 |
+
|
| 125 |
+
// Size of the index in bytes
|
| 126 |
+
Size() int
|
| 127 |
+
|
| 128 |
+
QuantileKeys(q int) [][]byte
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
type segmentConfig struct {
|
| 132 |
+
mmapContents bool
|
| 133 |
+
useBloomFilter bool
|
| 134 |
+
calcCountNetAdditions bool
|
| 135 |
+
overwriteDerived bool
|
| 136 |
+
enableChecksumValidation bool
|
| 137 |
+
MinMMapSize int64
|
| 138 |
+
allocChecker memwatch.AllocChecker
|
| 139 |
+
fileList map[string]int64
|
| 140 |
+
precomputedCountNetAdditions *int
|
| 141 |
+
writeMetadata bool
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
// newSegment creates a new segment structure, representing an LSM disk segment.
|
| 145 |
+
//
|
| 146 |
+
// This function is partially copied by a function called preComputeSegmentMeta.
|
| 147 |
+
// Any changes made here should likely be made in preComputeSegmentMeta as well,
|
| 148 |
+
// and vice versa. This is absolutely not ideal, but in the short time I was able
|
| 149 |
+
// to consider this, I wasn't able to find a way to unify the two -- there are
|
| 150 |
+
// subtle differences.
|
| 151 |
+
func newSegment(path string, logger logrus.FieldLogger, metrics *Metrics,
|
| 152 |
+
existsLower existsOnLowerSegmentsFn, cfg segmentConfig,
|
| 153 |
+
) (_ *segment, rerr error) {
|
| 154 |
+
defer func() {
|
| 155 |
+
p := recover()
|
| 156 |
+
if p == nil {
|
| 157 |
+
return
|
| 158 |
+
}
|
| 159 |
+
entsentry.Recover(p)
|
| 160 |
+
rerr = fmt.Errorf("unexpected error loading segment %q: %v", path, p)
|
| 161 |
+
}()
|
| 162 |
+
|
| 163 |
+
file, err := os.Open(path)
|
| 164 |
+
if err != nil {
|
| 165 |
+
return nil, fmt.Errorf("open file: %w", err)
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
// The lifetime of the `file` exceeds this constructor as we store the open file for later use in `contentFile`.
|
| 169 |
+
// invariant: We close **only** if any error happened after successfully opening the file. To avoid leaking open file descriptor.
|
| 170 |
+
// NOTE: This `defer` works even with `err` being shadowed in the whole function because defer checks for named `rerr` return value.
|
| 171 |
+
defer func() {
|
| 172 |
+
if rerr != nil {
|
| 173 |
+
file.Close()
|
| 174 |
+
}
|
| 175 |
+
}()
|
| 176 |
+
|
| 177 |
+
var size int64
|
| 178 |
+
if cfg.fileList != nil {
|
| 179 |
+
if fileSize, ok := cfg.fileList[file.Name()]; ok {
|
| 180 |
+
size = fileSize
|
| 181 |
+
}
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
// fallback to getting the filesize from disk in case it wasn't prefetched (for example, for new segments after compaction)
|
| 185 |
+
if size == 0 {
|
| 186 |
+
fileInfo, err := file.Stat()
|
| 187 |
+
if err != nil {
|
| 188 |
+
return nil, fmt.Errorf("stat file: %w", err)
|
| 189 |
+
}
|
| 190 |
+
size = fileInfo.Size()
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
// mmap has some overhead, we can read small files directly to memory
|
| 194 |
+
var contents []byte
|
| 195 |
+
var unMapContents bool
|
| 196 |
+
var allocCheckerErr error
|
| 197 |
+
|
| 198 |
+
if size <= cfg.MinMMapSize { // check if it is a candidate for full reading
|
| 199 |
+
if cfg.allocChecker == nil {
|
| 200 |
+
logger.WithFields(logrus.Fields{
|
| 201 |
+
"path": path,
|
| 202 |
+
"size": size,
|
| 203 |
+
"minMMapSize": cfg.MinMMapSize,
|
| 204 |
+
}).Info("allocChecker is nil, skipping memory pressure check for new segment")
|
| 205 |
+
} else {
|
| 206 |
+
allocCheckerErr = cfg.allocChecker.CheckAlloc(size) // check if we have enough memory
|
| 207 |
+
if allocCheckerErr != nil {
|
| 208 |
+
logger.Debugf("memory pressure: cannot fully read segment")
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
useBloomFilter := cfg.useBloomFilter
|
| 214 |
+
readFromMemory := cfg.mmapContents
|
| 215 |
+
if size > cfg.MinMMapSize || cfg.allocChecker == nil || allocCheckerErr != nil { // mmap the file if it's too large or if we have memory pressure
|
| 216 |
+
contents2, err := mmap.MapRegion(file, int(size), mmap.RDONLY, 0, 0)
|
| 217 |
+
if err != nil {
|
| 218 |
+
return nil, fmt.Errorf("mmap file: %w", err)
|
| 219 |
+
}
|
| 220 |
+
contents = contents2
|
| 221 |
+
unMapContents = true
|
| 222 |
+
} else { // read the file into memory if it's small enough and we have enough memory
|
| 223 |
+
meteredF := diskio.NewMeteredReader(file, diskio.MeteredReaderCallback(metrics.ReadObserver("readSegmentFile")))
|
| 224 |
+
bufio.NewReader(meteredF)
|
| 225 |
+
contents, err = io.ReadAll(meteredF)
|
| 226 |
+
if err != nil {
|
| 227 |
+
return nil, fmt.Errorf("read file: %w", err)
|
| 228 |
+
}
|
| 229 |
+
unMapContents = false
|
| 230 |
+
readFromMemory = true
|
| 231 |
+
useBloomFilter = false
|
| 232 |
+
}
|
| 233 |
+
header, err := segmentindex.ParseHeader(contents[:segmentindex.HeaderSize])
|
| 234 |
+
if err != nil {
|
| 235 |
+
return nil, fmt.Errorf("parse header: %w", err)
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
if err := segmentindex.CheckExpectedStrategy(header.Strategy); err != nil {
|
| 239 |
+
return nil, fmt.Errorf("unsupported strategy in segment: %w", err)
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
if header.Version >= segmentindex.SegmentV1 && cfg.enableChecksumValidation {
|
| 243 |
+
file.Seek(0, io.SeekStart)
|
| 244 |
+
headerSize := int64(segmentindex.HeaderSize)
|
| 245 |
+
if header.Strategy == segmentindex.StrategyInverted {
|
| 246 |
+
headerSize += int64(segmentindex.HeaderInvertedSize)
|
| 247 |
+
}
|
| 248 |
+
segmentFile := segmentindex.NewSegmentFile(segmentindex.WithReader(file))
|
| 249 |
+
if err := segmentFile.ValidateChecksum(size, headerSize); err != nil {
|
| 250 |
+
return nil, fmt.Errorf("validate segment %q: %w", path, err)
|
| 251 |
+
}
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
primaryIndex, err := header.PrimaryIndex(contents)
|
| 255 |
+
if err != nil {
|
| 256 |
+
return nil, fmt.Errorf("extract primary index position: %w", err)
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
// if there are no secondary indices and checksum validation is enabled,
|
| 260 |
+
// we need to remove the checksum bytes from the primary index
|
| 261 |
+
// See below for the same logic if there are secondary indices
|
| 262 |
+
if header.Version >= segmentindex.SegmentV1 && cfg.enableChecksumValidation && header.SecondaryIndices == 0 {
|
| 263 |
+
primaryIndex = primaryIndex[:len(primaryIndex)-segmentindex.ChecksumSize]
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
primaryDiskIndex := segmentindex.NewDiskTree(primaryIndex)
|
| 267 |
+
|
| 268 |
+
dataStartPos := uint64(segmentindex.HeaderSize)
|
| 269 |
+
dataEndPos := header.IndexStart
|
| 270 |
+
|
| 271 |
+
var invertedHeader *segmentindex.HeaderInverted
|
| 272 |
+
if header.Strategy == segmentindex.StrategyInverted {
|
| 273 |
+
invertedHeader, err = segmentindex.LoadHeaderInverted(contents[segmentindex.HeaderSize : segmentindex.HeaderSize+segmentindex.HeaderInvertedSize])
|
| 274 |
+
if err != nil {
|
| 275 |
+
return nil, errors.Wrap(err, "load inverted header")
|
| 276 |
+
}
|
| 277 |
+
dataStartPos = invertedHeader.KeysOffset
|
| 278 |
+
dataEndPos = invertedHeader.TombstoneOffset
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
stratLabel := header.Strategy.String()
|
| 282 |
+
observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{
|
| 283 |
+
"strategy": stratLabel,
|
| 284 |
+
"operation": "segmentMetadata",
|
| 285 |
+
})
|
| 286 |
+
|
| 287 |
+
if unMapContents {
|
| 288 |
+
// a map was created, track it
|
| 289 |
+
monitoring.GetMetrics().MmapOperations.With(prometheus.Labels{
|
| 290 |
+
"operation": "mmap",
|
| 291 |
+
"strategy": stratLabel,
|
| 292 |
+
}).Inc()
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
seg := &segment{
|
| 296 |
+
level: header.Level,
|
| 297 |
+
path: path,
|
| 298 |
+
contents: contents,
|
| 299 |
+
version: header.Version,
|
| 300 |
+
secondaryIndexCount: header.SecondaryIndices,
|
| 301 |
+
segmentStartPos: header.IndexStart,
|
| 302 |
+
segmentEndPos: uint64(size),
|
| 303 |
+
strategy: header.Strategy,
|
| 304 |
+
dataStartPos: dataStartPos,
|
| 305 |
+
dataEndPos: dataEndPos,
|
| 306 |
+
index: primaryDiskIndex,
|
| 307 |
+
logger: logger,
|
| 308 |
+
metrics: metrics,
|
| 309 |
+
size: size,
|
| 310 |
+
readFromMemory: readFromMemory,
|
| 311 |
+
useBloomFilter: useBloomFilter,
|
| 312 |
+
calcCountNetAdditions: cfg.calcCountNetAdditions,
|
| 313 |
+
invertedHeader: invertedHeader,
|
| 314 |
+
invertedData: &segmentInvertedData{
|
| 315 |
+
tombstones: sroar.NewBitmap(),
|
| 316 |
+
},
|
| 317 |
+
unMapContents: unMapContents,
|
| 318 |
+
observeMetaWrite: func(n int64) { observeWrite.Observe(float64(n)) },
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
// Using pread strategy requires file to remain open for segment lifetime
|
| 322 |
+
if seg.readFromMemory {
|
| 323 |
+
defer file.Close()
|
| 324 |
+
} else {
|
| 325 |
+
seg.contentFile = file
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
if seg.secondaryIndexCount > 0 {
|
| 329 |
+
seg.secondaryIndices = make([]diskIndex, seg.secondaryIndexCount)
|
| 330 |
+
for i := range seg.secondaryIndices {
|
| 331 |
+
secondary, err := header.SecondaryIndex(contents, uint16(i))
|
| 332 |
+
if err != nil {
|
| 333 |
+
return nil, fmt.Errorf("get position for secondary index at %d: %w", i, err)
|
| 334 |
+
}
|
| 335 |
+
// if we are on the last secondary index and checksum validation is enabled,
|
| 336 |
+
// we need to remove the checksum bytes from the secondary index
|
| 337 |
+
if header.Version >= segmentindex.SegmentV1 && cfg.enableChecksumValidation && i == int(seg.secondaryIndexCount-1) {
|
| 338 |
+
secondary = secondary[:len(secondary)-segmentindex.ChecksumSize]
|
| 339 |
+
}
|
| 340 |
+
seg.secondaryIndices[i] = segmentindex.NewDiskTree(secondary)
|
| 341 |
+
}
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
metadataRead, err := seg.initMetadata(metrics, cfg.overwriteDerived, existsLower, cfg.precomputedCountNetAdditions, cfg.fileList, cfg.writeMetadata)
|
| 345 |
+
if err != nil {
|
| 346 |
+
return nil, fmt.Errorf("init metadata: %w", err)
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
if !metadataRead {
|
| 350 |
+
if seg.useBloomFilter {
|
| 351 |
+
if err := seg.initBloomFilters(metrics, cfg.overwriteDerived, cfg.fileList); err != nil {
|
| 352 |
+
return nil, err
|
| 353 |
+
}
|
| 354 |
+
}
|
| 355 |
+
if seg.calcCountNetAdditions {
|
| 356 |
+
if err := seg.initCountNetAdditions(existsLower, cfg.overwriteDerived, cfg.precomputedCountNetAdditions, cfg.fileList); err != nil {
|
| 357 |
+
return nil, err
|
| 358 |
+
}
|
| 359 |
+
}
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
if seg.strategy == segmentindex.StrategyInverted {
|
| 363 |
+
_, err := seg.loadTombstones()
|
| 364 |
+
if err != nil {
|
| 365 |
+
return nil, fmt.Errorf("load tombstones: %w", err)
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
_, err = seg.loadPropertyLengths()
|
| 369 |
+
if err != nil {
|
| 370 |
+
return nil, fmt.Errorf("load property lengths: %w", err)
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
return seg, nil
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
func (s *segment) close() error {
|
| 379 |
+
var munmapErr, fileCloseErr error
|
| 380 |
+
if s.unMapContents {
|
| 381 |
+
m := mmap.MMap(s.contents)
|
| 382 |
+
munmapErr = m.Unmap()
|
| 383 |
+
stratLabel := s.strategy.String()
|
| 384 |
+
monitoring.GetMetrics().MmapOperations.With(prometheus.Labels{
|
| 385 |
+
"operation": "munmap",
|
| 386 |
+
"strategy": stratLabel,
|
| 387 |
+
}).Inc()
|
| 388 |
+
}
|
| 389 |
+
if s.contentFile != nil {
|
| 390 |
+
fileCloseErr = s.contentFile.Close()
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
if munmapErr != nil || fileCloseErr != nil {
|
| 394 |
+
return fmt.Errorf("close segment: munmap: %w, close contents file: %w", munmapErr, fileCloseErr)
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
return nil
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
func (s *segment) dropImmediately() error {
|
| 401 |
+
// support for persisting bloom filters and cnas was added in v1.17,
|
| 402 |
+
// therefore the files may not be present on segments created with previous
|
| 403 |
+
// versions. By using RemoveAll, which does not error on NotExists, these
|
| 404 |
+
// drop calls are backward-compatible:
|
| 405 |
+
if err := os.RemoveAll(s.bloomFilterPath()); err != nil {
|
| 406 |
+
return fmt.Errorf("drop bloom filter: %w", err)
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
for i := 0; i < int(s.secondaryIndexCount); i++ {
|
| 410 |
+
if err := os.RemoveAll(s.bloomFilterSecondaryPath(i)); err != nil {
|
| 411 |
+
return fmt.Errorf("drop bloom filter: %w", err)
|
| 412 |
+
}
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
if err := os.RemoveAll(s.countNetPath()); err != nil {
|
| 416 |
+
return fmt.Errorf("drop count net additions file: %w", err)
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
if err := os.RemoveAll(s.metadataPath()); err != nil {
|
| 420 |
+
return fmt.Errorf("drop metadata file: %w", err)
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
// for the segment itself, we're not using RemoveAll, but Remove. If there
|
| 424 |
+
// was a NotExists error here, something would be seriously wrong, and we
|
| 425 |
+
// don't want to ignore it.
|
| 426 |
+
if err := os.Remove(s.path); err != nil {
|
| 427 |
+
return fmt.Errorf("drop segment: %w", err)
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
return nil
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
func (s *segment) dropMarked() error {
|
| 434 |
+
// support for persisting bloom filters and cnas was added in v1.17,
|
| 435 |
+
// therefore the files may not be present on segments created with previous
|
| 436 |
+
// versions. By using RemoveAll, which does not error on NotExists, these
|
| 437 |
+
// drop calls are backward-compatible:
|
| 438 |
+
if err := os.RemoveAll(s.bloomFilterPath() + DeleteMarkerSuffix); err != nil {
|
| 439 |
+
return fmt.Errorf("drop previously marked bloom filter: %w", err)
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
for i := 0; i < int(s.secondaryIndexCount); i++ {
|
| 443 |
+
if err := os.RemoveAll(s.bloomFilterSecondaryPath(i) + DeleteMarkerSuffix); err != nil {
|
| 444 |
+
return fmt.Errorf("drop previously marked secondary bloom filter: %w", err)
|
| 445 |
+
}
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
if err := os.RemoveAll(s.countNetPath() + DeleteMarkerSuffix); err != nil {
|
| 449 |
+
return fmt.Errorf("drop previously marked count net additions file: %w", err)
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
if err := os.RemoveAll(s.metadataPath() + DeleteMarkerSuffix); err != nil {
|
| 453 |
+
return fmt.Errorf("drop previously marked metadata file: %w", err)
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
// for the segment itself, we're not using RemoveAll, but Remove. If there
|
| 457 |
+
// was a NotExists error here, something would be seriously wrong, and we
|
| 458 |
+
// don't want to ignore it.
|
| 459 |
+
if err := os.Remove(s.path + DeleteMarkerSuffix); err != nil {
|
| 460 |
+
return fmt.Errorf("drop previously marked segment: %w", err)
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
return nil
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
const DeleteMarkerSuffix = ".deleteme"
|
| 467 |
+
|
| 468 |
+
func markDeleted(path string) error {
|
| 469 |
+
return os.Rename(path, path+DeleteMarkerSuffix)
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
func (s *segment) markForDeletion() error {
|
| 473 |
+
// support for persisting bloom filters and cnas was added in v1.17,
|
| 474 |
+
// therefore the files may not be present on segments created with previous
|
| 475 |
+
// versions. If we get a not exist error, we ignore it.
|
| 476 |
+
if err := markDeleted(s.bloomFilterPath()); err != nil {
|
| 477 |
+
if !os.IsNotExist(err) {
|
| 478 |
+
return fmt.Errorf("mark bloom filter deleted: %w", err)
|
| 479 |
+
}
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
for i := 0; i < int(s.secondaryIndexCount); i++ {
|
| 483 |
+
if err := markDeleted(s.bloomFilterSecondaryPath(i)); err != nil {
|
| 484 |
+
if !os.IsNotExist(err) {
|
| 485 |
+
return fmt.Errorf("mark secondary bloom filter deleted: %w", err)
|
| 486 |
+
}
|
| 487 |
+
}
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
if err := markDeleted(s.countNetPath()); err != nil {
|
| 491 |
+
if !os.IsNotExist(err) {
|
| 492 |
+
return fmt.Errorf("mark count net additions file deleted: %w", err)
|
| 493 |
+
}
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
if err := markDeleted(s.metadataPath()); err != nil {
|
| 497 |
+
if !os.IsNotExist(err) {
|
| 498 |
+
return fmt.Errorf("mark metadata file deleted: %w", err)
|
| 499 |
+
}
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
// for the segment itself, we're not accepting a NotExists error. If there
|
| 503 |
+
// was a NotExists error here, something would be seriously wrong, and we
|
| 504 |
+
// don't want to ignore it.
|
| 505 |
+
if err := markDeleted(s.path); err != nil {
|
| 506 |
+
return fmt.Errorf("mark segment deleted: %w", err)
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
return nil
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
// Size returns the total size of the segment in bytes, including the header
|
| 513 |
+
// and index
|
| 514 |
+
func (s *segment) Size() int {
|
| 515 |
+
return int(s.size)
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
func (s *segment) getPath() string {
|
| 519 |
+
return s.path
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
func (s *segment) setPath(path string) {
|
| 523 |
+
s.path = path
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
func (s *segment) getStrategy() segmentindex.Strategy {
|
| 527 |
+
return s.strategy
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
func (s *segment) getSecondaryIndexCount() uint16 {
|
| 531 |
+
return s.secondaryIndexCount
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
func (s *segment) getCountNetAdditions() int {
|
| 535 |
+
return s.countNetAdditions
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
func (s *segment) getLevel() uint16 {
|
| 539 |
+
return s.level
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
func (s *segment) getSize() int64 {
|
| 543 |
+
return s.size
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
func (s *segment) setSize(size int64) {
|
| 547 |
+
s.size = size
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
func (s *segment) getInvertedData() *segmentInvertedData {
|
| 551 |
+
return s.invertedData
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
func (s *segment) getSegment() *segment {
|
| 555 |
+
return s
|
| 556 |
+
}
|
| 557 |
+
|
| 558 |
+
func (s *segment) isLoaded() bool {
|
| 559 |
+
return true
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
// PayloadSize is only the payload of the index, excluding the index
|
| 563 |
+
func (s *segment) PayloadSize() int {
|
| 564 |
+
return int(s.dataEndPos)
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
type nodeReader struct {
|
| 568 |
+
r io.Reader
|
| 569 |
+
releaseFn func()
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
func (n *nodeReader) Read(b []byte) (int, error) {
|
| 573 |
+
if n.r == nil {
|
| 574 |
+
panic("nodeReader.Read called after Release")
|
| 575 |
+
}
|
| 576 |
+
return n.r.Read(b)
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
func (n *nodeReader) Release() {
|
| 580 |
+
n.r = nil
|
| 581 |
+
n.releaseFn()
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
type nodeOffset struct {
|
| 585 |
+
start, end uint64
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
func (s *segment) newNodeReader(offset nodeOffset, operation string) (*nodeReader, error) {
|
| 589 |
+
var (
|
| 590 |
+
r io.Reader
|
| 591 |
+
err error
|
| 592 |
+
release = func() {} // no-op function for un-pooled readers
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
if s.readFromMemory {
|
| 596 |
+
contents := s.contents[offset.start:]
|
| 597 |
+
if offset.end != 0 {
|
| 598 |
+
contents = s.contents[offset.start:offset.end]
|
| 599 |
+
}
|
| 600 |
+
r, err = s.bytesReaderFrom(contents)
|
| 601 |
+
} else {
|
| 602 |
+
r, release, err = s.bufferedReaderAt(offset.start, "ReadFromSegment"+operation)
|
| 603 |
+
}
|
| 604 |
+
if err != nil {
|
| 605 |
+
return nil, fmt.Errorf("new nodeReader: %w", err)
|
| 606 |
+
}
|
| 607 |
+
return &nodeReader{r: r, releaseFn: release}, nil
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
func (s *segment) copyNode(b []byte, offset nodeOffset) error {
|
| 611 |
+
if s.readFromMemory {
|
| 612 |
+
copy(b, s.contents[offset.start:offset.end])
|
| 613 |
+
return nil
|
| 614 |
+
}
|
| 615 |
+
n, err := s.newNodeReader(offset, "copyNode")
|
| 616 |
+
if err != nil {
|
| 617 |
+
return fmt.Errorf("copy node: %w", err)
|
| 618 |
+
}
|
| 619 |
+
defer n.Release()
|
| 620 |
+
|
| 621 |
+
_, err = io.ReadFull(n, b)
|
| 622 |
+
return err
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
func (s *segment) bytesReaderFrom(in []byte) (*bytes.Reader, error) {
|
| 626 |
+
if len(in) == 0 {
|
| 627 |
+
return nil, lsmkv.NotFound
|
| 628 |
+
}
|
| 629 |
+
return bytes.NewReader(in), nil
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
func (s *segment) bufferedReaderAt(offset uint64, operation string) (io.Reader, func(), error) {
|
| 633 |
+
if s.contentFile == nil {
|
| 634 |
+
return nil, nil, fmt.Errorf("nil contentFile for segment at %s", s.path)
|
| 635 |
+
}
|
| 636 |
+
|
| 637 |
+
meteredF := diskio.NewMeteredReader(s.contentFile, diskio.MeteredReaderCallback(readObserver.GetOrCreate(operation, s.metrics)))
|
| 638 |
+
r := io.NewSectionReader(meteredF, int64(offset), s.size)
|
| 639 |
+
|
| 640 |
+
bufioR := bufReaderPool.Get().(*bufio.Reader)
|
| 641 |
+
bufioR.Reset(r)
|
| 642 |
+
|
| 643 |
+
releaseFn := func() {
|
| 644 |
+
bufReaderPool.Put(bufioR)
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
return bufioR, releaseFn, nil
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
var (
|
| 651 |
+
bufReaderPool *sync.Pool
|
| 652 |
+
readObserver *readObserverCache
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
func init() {
|
| 656 |
+
bufReaderPool = &sync.Pool{
|
| 657 |
+
New: func() interface{} {
|
| 658 |
+
return bufio.NewReader(nil)
|
| 659 |
+
},
|
| 660 |
+
}
|
| 661 |
+
|
| 662 |
+
readObserver = &readObserverCache{}
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
type readObserverCache struct {
|
| 666 |
+
sync.Map
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
// GetOrCreate returns a BytesReadObserver for the given key if it exists or
|
| 670 |
+
// creates one if it doesn't.
|
| 671 |
+
//
|
| 672 |
+
// Note that the design is not atomic, so it is possible that a single key will
|
| 673 |
+
// be initialize multiple times. This is not a problem, it only adds a slight
|
| 674 |
+
// re-allocation penalty, but does not alter the behavior
|
| 675 |
+
func (c *readObserverCache) GetOrCreate(key string, metrics *Metrics) BytesReadObserver {
|
| 676 |
+
if v, ok := c.Load(key); ok {
|
| 677 |
+
return v.(BytesReadObserver)
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
observer := metrics.ReadObserver(key)
|
| 681 |
+
c.Store(key, observer)
|
| 682 |
+
return observer
|
| 683 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax.go
ADDED
|
@@ -0,0 +1,603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"encoding/binary"
|
| 16 |
+
"io"
|
| 17 |
+
"math"
|
| 18 |
+
|
| 19 |
+
"github.com/weaviate/sroar"
|
| 20 |
+
"github.com/weaviate/weaviate/adapters/repos/db/helpers"
|
| 21 |
+
"github.com/weaviate/weaviate/adapters/repos/db/inverted/terms"
|
| 22 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 23 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/varenc"
|
| 24 |
+
"github.com/weaviate/weaviate/entities/schema"
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
var blockMaxBufferSize = 4096
|
| 28 |
+
|
| 29 |
+
func (s *segment) loadBlockEntries(node segmentindex.Node) ([]*terms.BlockEntry, uint64, *terms.BlockDataDecoded, error) {
|
| 30 |
+
var buf []byte
|
| 31 |
+
if s.readFromMemory {
|
| 32 |
+
buf = s.contents[node.Start : node.Start+uint64(8+12*terms.ENCODE_AS_FULL_BYTES)]
|
| 33 |
+
} else {
|
| 34 |
+
// read first 8 bytes to get
|
| 35 |
+
buf = make([]byte, 8+12*terms.ENCODE_AS_FULL_BYTES)
|
| 36 |
+
r, err := s.newNodeReader(nodeOffset{node.Start, node.Start + uint64(8+12*terms.ENCODE_AS_FULL_BYTES)}, "loadBMW")
|
| 37 |
+
if err != nil {
|
| 38 |
+
return nil, 0, nil, err
|
| 39 |
+
}
|
| 40 |
+
defer r.Release()
|
| 41 |
+
|
| 42 |
+
_, err = r.Read(buf)
|
| 43 |
+
if err != nil {
|
| 44 |
+
return nil, 0, nil, err
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
docCount := binary.LittleEndian.Uint64(buf)
|
| 49 |
+
|
| 50 |
+
if docCount <= uint64(terms.ENCODE_AS_FULL_BYTES) {
|
| 51 |
+
data := convertFixedLengthFromMemory(buf, int(docCount))
|
| 52 |
+
entries := make([]*terms.BlockEntry, 1)
|
| 53 |
+
propLength := s.invertedData.propertyLengths[data.DocIds[0]]
|
| 54 |
+
tf := data.Tfs[0]
|
| 55 |
+
entries[0] = &terms.BlockEntry{
|
| 56 |
+
Offset: 0,
|
| 57 |
+
MaxId: data.DocIds[len(data.DocIds)-1],
|
| 58 |
+
MaxImpactTf: uint32(tf),
|
| 59 |
+
MaxImpactPropLength: uint32(propLength),
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
return entries, docCount, data, nil
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
blockCount := (docCount + uint64(terms.BLOCK_SIZE-1)) / uint64(terms.BLOCK_SIZE)
|
| 66 |
+
|
| 67 |
+
entries := make([]*terms.BlockEntry, blockCount)
|
| 68 |
+
if s.readFromMemory {
|
| 69 |
+
buf = s.contents[node.Start+16 : node.Start+16+uint64(blockCount*20)]
|
| 70 |
+
} else {
|
| 71 |
+
r, err := s.newNodeReader(nodeOffset{node.Start + 16, node.Start + 16 + uint64(blockCount*20)}, "loadBMW")
|
| 72 |
+
if err != nil {
|
| 73 |
+
return nil, 0, nil, err
|
| 74 |
+
}
|
| 75 |
+
defer r.Release()
|
| 76 |
+
|
| 77 |
+
buf = make([]byte, blockCount*20)
|
| 78 |
+
_, err = r.Read(buf)
|
| 79 |
+
if err != nil {
|
| 80 |
+
return nil, 0, nil, err
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
for i := 0; i < int(blockCount); i++ {
|
| 85 |
+
entries[i] = terms.DecodeBlockEntry(buf[i*20 : (i+1)*20])
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
return entries, docCount, nil, nil
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
// todo: check if there is a performance impact of starting to sectionReader at offset and not have to pass offset here
|
| 92 |
+
func (s *segment) loadBlockDataReusable(sectionReader *io.SectionReader, blockDataBufferOffset, offset, offsetStart, offsetEnd uint64, buf []byte, encoded *terms.BlockData) (uint64, error) {
|
| 93 |
+
if s.readFromMemory {
|
| 94 |
+
terms.DecodeBlockDataReusable(s.contents[offsetStart:offsetEnd], encoded)
|
| 95 |
+
return offsetStart, nil
|
| 96 |
+
} else {
|
| 97 |
+
if offsetStart < blockDataBufferOffset || offsetEnd > blockDataBufferOffset+uint64(len(buf)) {
|
| 98 |
+
sectionReader.Seek(int64(offsetStart-offset), io.SeekStart)
|
| 99 |
+
_, err := sectionReader.Read(buf)
|
| 100 |
+
// EOF is expected when the last block + tree are smaller than the buffer
|
| 101 |
+
if err != nil && err.Error() != "EOF" {
|
| 102 |
+
return 0, err
|
| 103 |
+
}
|
| 104 |
+
// readBytes += int64(n)
|
| 105 |
+
// readCounts++
|
| 106 |
+
blockDataBufferOffset = offsetStart
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
bufOffsetStart := offsetStart - blockDataBufferOffset
|
| 110 |
+
bufOffsetEnd := offsetEnd - blockDataBufferOffset
|
| 111 |
+
terms.DecodeBlockDataReusable(buf[bufOffsetStart:bufOffsetEnd], encoded)
|
| 112 |
+
return blockDataBufferOffset, nil
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
type BlockMetrics struct {
|
| 117 |
+
BlockCountTotal uint64
|
| 118 |
+
BlockCountDecodedDocIds uint64
|
| 119 |
+
BlockCountDecodedFreqs uint64
|
| 120 |
+
DocCountTotal uint64
|
| 121 |
+
DocCountDecodedDocIds uint64
|
| 122 |
+
DocCountDecodedFreqs uint64
|
| 123 |
+
DocCountScored uint64
|
| 124 |
+
QueryCount uint64
|
| 125 |
+
LastAddedBlock int
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
type SegmentBlockMax struct {
|
| 129 |
+
segment *segment
|
| 130 |
+
node segmentindex.Node
|
| 131 |
+
docCount uint64
|
| 132 |
+
blockEntries []*terms.BlockEntry
|
| 133 |
+
blockEntryIdx int
|
| 134 |
+
blockDataBufferOffset uint64
|
| 135 |
+
blockDataBuffer []byte
|
| 136 |
+
blockDataEncoded *terms.BlockData
|
| 137 |
+
blockDataDecoded *terms.BlockDataDecoded
|
| 138 |
+
blockDataIdx int
|
| 139 |
+
blockDataSize int
|
| 140 |
+
blockDataStartOffset uint64
|
| 141 |
+
blockDataEndOffset uint64
|
| 142 |
+
idPointer uint64
|
| 143 |
+
idf float64
|
| 144 |
+
exhausted bool
|
| 145 |
+
decoded bool
|
| 146 |
+
freqDecoded bool
|
| 147 |
+
queryTermIndex int
|
| 148 |
+
Metrics BlockMetrics
|
| 149 |
+
averagePropLength float64
|
| 150 |
+
b float64
|
| 151 |
+
k1 float64
|
| 152 |
+
propertyBoost float64
|
| 153 |
+
|
| 154 |
+
currentBlockImpact float32
|
| 155 |
+
currentBlockMaxId uint64
|
| 156 |
+
tombstones *sroar.Bitmap
|
| 157 |
+
filterDocIds *sroar.Bitmap
|
| 158 |
+
|
| 159 |
+
// at position 0 we have the doc ids decoder, at position 1 is the tfs decoder
|
| 160 |
+
decoders []varenc.VarEncEncoder[uint64]
|
| 161 |
+
|
| 162 |
+
propLengths map[uint64]uint32
|
| 163 |
+
blockDatasTest []*terms.BlockData
|
| 164 |
+
|
| 165 |
+
sectionReader *io.SectionReader
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
func generateSingleFilter(tombstones *sroar.Bitmap, filterDocIds helpers.AllowList) (*sroar.Bitmap, *sroar.Bitmap) {
|
| 169 |
+
if tombstones != nil && tombstones.IsEmpty() {
|
| 170 |
+
tombstones = nil
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
var filterSroar *sroar.Bitmap
|
| 174 |
+
// if we don't have an allow list filter, tombstones are the only needed filter
|
| 175 |
+
if filterDocIds != nil {
|
| 176 |
+
// the ok check should always succeed, but we keep it for safety
|
| 177 |
+
bm, ok := filterDocIds.(*helpers.BitmapAllowList)
|
| 178 |
+
// if we have a (allow list) filter and a (block list) tombstones filter, we can combine them into a single allowlist filter filter
|
| 179 |
+
if ok && tombstones != nil {
|
| 180 |
+
filterSroar = bm.Bm.AndNot(tombstones)
|
| 181 |
+
tombstones = nil
|
| 182 |
+
} else if ok && tombstones == nil {
|
| 183 |
+
filterSroar = bm.Bm
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
return tombstones, filterSroar
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
func NewSegmentBlockMax(s *segment, key []byte, queryTermIndex int, idf float64, propertyBoost float32, tombstones *sroar.Bitmap, filterDocIds helpers.AllowList, averagePropLength float64, config schema.BM25Config) *SegmentBlockMax {
|
| 190 |
+
node, err := s.index.Get(key)
|
| 191 |
+
if err != nil {
|
| 192 |
+
return nil
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
tombstones, filterSroar := generateSingleFilter(tombstones, filterDocIds)
|
| 196 |
+
|
| 197 |
+
// if filter is empty after checking for tombstones,
|
| 198 |
+
// we can skip it and return nil for the segment
|
| 199 |
+
if filterSroar != nil && filterSroar.IsEmpty() {
|
| 200 |
+
return nil
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
codecs := s.invertedHeader.DataFields
|
| 204 |
+
decoders := make([]varenc.VarEncEncoder[uint64], len(codecs))
|
| 205 |
+
|
| 206 |
+
for i, codec := range codecs {
|
| 207 |
+
decoders[i] = varenc.GetVarEncEncoder64(codec)
|
| 208 |
+
decoders[i].Init(terms.BLOCK_SIZE)
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
var sectionReader *io.SectionReader
|
| 212 |
+
|
| 213 |
+
if !s.readFromMemory {
|
| 214 |
+
sectionReader = io.NewSectionReader(s.contentFile, int64(node.Start), int64(node.End))
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
output := &SegmentBlockMax{
|
| 218 |
+
segment: s,
|
| 219 |
+
node: node,
|
| 220 |
+
idf: idf,
|
| 221 |
+
queryTermIndex: queryTermIndex,
|
| 222 |
+
averagePropLength: averagePropLength,
|
| 223 |
+
|
| 224 |
+
b: config.B,
|
| 225 |
+
k1: config.K1,
|
| 226 |
+
decoders: decoders,
|
| 227 |
+
propertyBoost: float64(propertyBoost),
|
| 228 |
+
filterDocIds: filterSroar,
|
| 229 |
+
tombstones: tombstones,
|
| 230 |
+
sectionReader: sectionReader,
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
err = output.reset()
|
| 234 |
+
if err != nil {
|
| 235 |
+
return nil
|
| 236 |
+
}
|
| 237 |
+
output.Metrics.BlockCountTotal += uint64(len(output.blockEntries))
|
| 238 |
+
output.Metrics.DocCountTotal += output.docCount
|
| 239 |
+
output.Metrics.LastAddedBlock = -1
|
| 240 |
+
|
| 241 |
+
return output
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
func NewSegmentBlockMaxTest(docCount uint64, blockEntries []*terms.BlockEntry, blockDatas []*terms.BlockData, propLengths map[uint64]uint32, key []byte, queryTermIndex int, idf float64, propertyBoost float32, tombstones *sroar.Bitmap, filterDocIds helpers.AllowList, averagePropLength float64, config schema.BM25Config, codecs []varenc.VarEncDataType) *SegmentBlockMax {
|
| 245 |
+
decoders := make([]varenc.VarEncEncoder[uint64], len(codecs))
|
| 246 |
+
|
| 247 |
+
for i, codec := range codecs {
|
| 248 |
+
decoders[i] = varenc.GetVarEncEncoder64(codec)
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
tombstones, filterSroar := generateSingleFilter(tombstones, filterDocIds)
|
| 252 |
+
|
| 253 |
+
// if filter is empty after checking for tombstones,
|
| 254 |
+
// we can skip it and return nil for the segment
|
| 255 |
+
if filterSroar != nil && filterSroar.IsEmpty() {
|
| 256 |
+
return nil
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
output := &SegmentBlockMax{
|
| 260 |
+
blockEntries: blockEntries,
|
| 261 |
+
node: segmentindex.Node{Key: key},
|
| 262 |
+
idf: idf,
|
| 263 |
+
queryTermIndex: queryTermIndex,
|
| 264 |
+
averagePropLength: averagePropLength,
|
| 265 |
+
b: config.B,
|
| 266 |
+
k1: config.K1,
|
| 267 |
+
decoders: decoders,
|
| 268 |
+
propertyBoost: float64(propertyBoost),
|
| 269 |
+
filterDocIds: filterSroar,
|
| 270 |
+
tombstones: tombstones,
|
| 271 |
+
propLengths: propLengths,
|
| 272 |
+
blockDatasTest: blockDatas,
|
| 273 |
+
blockEntryIdx: 0,
|
| 274 |
+
blockDataIdx: 0,
|
| 275 |
+
docCount: docCount,
|
| 276 |
+
blockDataDecoded: &terms.BlockDataDecoded{
|
| 277 |
+
DocIds: make([]uint64, terms.BLOCK_SIZE),
|
| 278 |
+
Tfs: make([]uint64, terms.BLOCK_SIZE),
|
| 279 |
+
},
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
output.decodeBlock()
|
| 283 |
+
|
| 284 |
+
output.advanceOnTombstoneOrFilter()
|
| 285 |
+
|
| 286 |
+
output.Metrics.BlockCountTotal += uint64(len(output.blockEntries))
|
| 287 |
+
output.Metrics.DocCountTotal += output.docCount
|
| 288 |
+
output.Metrics.LastAddedBlock = -1
|
| 289 |
+
|
| 290 |
+
return output
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
func NewSegmentBlockMaxDecoded(key []byte, queryTermIndex int, propertyBoost float32, filterDocIds helpers.AllowList, averagePropLength float64, config schema.BM25Config) *SegmentBlockMax {
|
| 294 |
+
_, filterSroar := generateSingleFilter(nil, filterDocIds)
|
| 295 |
+
|
| 296 |
+
output := &SegmentBlockMax{
|
| 297 |
+
queryTermIndex: queryTermIndex,
|
| 298 |
+
node: segmentindex.Node{Key: key},
|
| 299 |
+
averagePropLength: averagePropLength,
|
| 300 |
+
b: config.B,
|
| 301 |
+
k1: config.K1,
|
| 302 |
+
propertyBoost: float64(propertyBoost),
|
| 303 |
+
filterDocIds: filterSroar,
|
| 304 |
+
blockEntryIdx: 0,
|
| 305 |
+
blockDataIdx: 0,
|
| 306 |
+
decoded: true,
|
| 307 |
+
freqDecoded: true,
|
| 308 |
+
exhausted: true,
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
output.Metrics.BlockCountTotal += uint64(len(output.blockEntries))
|
| 312 |
+
output.Metrics.DocCountTotal += output.docCount
|
| 313 |
+
output.Metrics.LastAddedBlock = -1
|
| 314 |
+
|
| 315 |
+
return output
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
func (s *SegmentBlockMax) advanceOnTombstoneOrFilter() {
|
| 319 |
+
if (s.filterDocIds == nil && s.tombstones == nil) || s.exhausted {
|
| 320 |
+
if !s.exhausted {
|
| 321 |
+
s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx]
|
| 322 |
+
}
|
| 323 |
+
return
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
for (s.filterDocIds != nil && !s.filterDocIds.Contains(s.blockDataDecoded.DocIds[s.blockDataIdx])) ||
|
| 327 |
+
(s.tombstones != nil && s.tombstones.Contains(s.blockDataDecoded.DocIds[s.blockDataIdx])) {
|
| 328 |
+
s.blockDataIdx++
|
| 329 |
+
if s.blockDataIdx > s.blockDataSize-1 {
|
| 330 |
+
if s.blockEntryIdx >= len(s.blockEntries)-1 {
|
| 331 |
+
s.exhaust()
|
| 332 |
+
return
|
| 333 |
+
}
|
| 334 |
+
s.blockEntryIdx++
|
| 335 |
+
s.blockDataIdx = 0
|
| 336 |
+
s.decodeBlock()
|
| 337 |
+
}
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
if !s.exhausted {
|
| 341 |
+
s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx]
|
| 342 |
+
}
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
func (s *SegmentBlockMax) reset() error {
|
| 346 |
+
var err error
|
| 347 |
+
|
| 348 |
+
s.propLengths, err = s.segment.GetPropertyLengths()
|
| 349 |
+
if err != nil {
|
| 350 |
+
return err
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
s.blockEntries, s.docCount, s.blockDataDecoded, err = s.segment.loadBlockEntries(s.node)
|
| 354 |
+
if err != nil {
|
| 355 |
+
return err
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
if s.blockDataDecoded == nil {
|
| 359 |
+
s.blockDataBuffer = make([]byte, blockMaxBufferSize)
|
| 360 |
+
s.blockDataDecoded = &terms.BlockDataDecoded{
|
| 361 |
+
DocIds: make([]uint64, terms.BLOCK_SIZE),
|
| 362 |
+
Tfs: make([]uint64, terms.BLOCK_SIZE),
|
| 363 |
+
}
|
| 364 |
+
s.blockDataEncoded = &terms.BlockData{}
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
s.blockEntryIdx = 0
|
| 368 |
+
s.blockDataIdx = 0
|
| 369 |
+
s.blockDataStartOffset = s.node.Start + 16 + uint64(len(s.blockEntries)*20)
|
| 370 |
+
s.blockDataEndOffset = s.node.End - uint64(len(s.node.Key)+4)
|
| 371 |
+
|
| 372 |
+
s.blockDataBufferOffset = s.blockDataStartOffset + 1
|
| 373 |
+
s.decodeBlock()
|
| 374 |
+
|
| 375 |
+
s.advanceOnTombstoneOrFilter()
|
| 376 |
+
|
| 377 |
+
return nil
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
func (s *SegmentBlockMax) decodeBlock() error {
|
| 381 |
+
if s.exhausted {
|
| 382 |
+
return nil
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
var err error
|
| 386 |
+
if s.blockEntries == nil {
|
| 387 |
+
return nil
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
if s.blockEntryIdx >= len(s.blockEntries) {
|
| 391 |
+
s.exhaust()
|
| 392 |
+
return nil
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
s.blockDataIdx = 0
|
| 396 |
+
if s.docCount <= uint64(terms.ENCODE_AS_FULL_BYTES) {
|
| 397 |
+
s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx]
|
| 398 |
+
s.blockDataSize = int(s.docCount)
|
| 399 |
+
s.freqDecoded = true
|
| 400 |
+
s.decoded = true
|
| 401 |
+
s.Metrics.BlockCountDecodedDocIds++
|
| 402 |
+
s.Metrics.DocCountDecodedDocIds += uint64(s.blockDataSize)
|
| 403 |
+
return nil
|
| 404 |
+
}
|
| 405 |
+
if s.segment != nil {
|
| 406 |
+
startOffset := uint64(s.blockEntries[s.blockEntryIdx].Offset) + s.blockDataStartOffset
|
| 407 |
+
endOffset := s.blockDataEndOffset
|
| 408 |
+
|
| 409 |
+
if s.blockEntryIdx < len(s.blockEntries)-1 {
|
| 410 |
+
endOffset = uint64(s.blockEntries[s.blockEntryIdx+1].Offset) + s.blockDataStartOffset
|
| 411 |
+
}
|
| 412 |
+
s.blockDataBufferOffset, err = s.segment.loadBlockDataReusable(s.sectionReader, s.blockDataBufferOffset, s.node.Start, startOffset, endOffset, s.blockDataBuffer, s.blockDataEncoded)
|
| 413 |
+
if err != nil {
|
| 414 |
+
return err
|
| 415 |
+
}
|
| 416 |
+
} else {
|
| 417 |
+
s.blockDataEncoded = s.blockDatasTest[s.blockEntryIdx]
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
s.blockDataSize = terms.BLOCK_SIZE
|
| 421 |
+
if s.blockEntryIdx == len(s.blockEntries)-1 {
|
| 422 |
+
s.blockDataSize = int(s.docCount) - terms.BLOCK_SIZE*s.blockEntryIdx
|
| 423 |
+
}
|
| 424 |
+
s.decoders[0].DecodeReusable(s.blockDataEncoded.DocIds, s.blockDataDecoded.DocIds[:s.blockDataSize])
|
| 425 |
+
s.Metrics.BlockCountDecodedDocIds++
|
| 426 |
+
s.Metrics.DocCountDecodedDocIds += uint64(s.blockDataSize)
|
| 427 |
+
s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx]
|
| 428 |
+
s.freqDecoded = false
|
| 429 |
+
s.decoded = true
|
| 430 |
+
s.currentBlockImpact = s.computeCurrentBlockImpact()
|
| 431 |
+
s.currentBlockMaxId = s.blockEntries[s.blockEntryIdx].MaxId
|
| 432 |
+
return nil
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
func (s *SegmentBlockMax) AdvanceAtLeast(docId uint64) {
|
| 436 |
+
if s.exhausted {
|
| 437 |
+
return
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
for s.blockEntryIdx < len(s.blockEntries) && docId > s.blockEntries[s.blockEntryIdx].MaxId {
|
| 441 |
+
s.blockEntryIdx++
|
| 442 |
+
s.decoded = false
|
| 443 |
+
s.freqDecoded = false
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
if (s.blockEntryIdx == len(s.blockEntries)-1 && docId > s.blockEntries[s.blockEntryIdx].MaxId) || s.blockEntryIdx >= len(s.blockEntries) {
|
| 447 |
+
s.exhaust()
|
| 448 |
+
return
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
if !s.decoded {
|
| 452 |
+
s.decodeBlock()
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
for s.blockDataIdx < s.blockDataSize-1 && docId > s.blockDataDecoded.DocIds[s.blockDataIdx] {
|
| 456 |
+
s.blockDataIdx++
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
s.advanceOnTombstoneOrFilter()
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
func (s *SegmentBlockMax) AdvanceAtLeastShallow(docId uint64) {
|
| 463 |
+
if s.exhausted {
|
| 464 |
+
return
|
| 465 |
+
}
|
| 466 |
+
if docId <= s.blockEntries[s.blockEntryIdx].MaxId {
|
| 467 |
+
return
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
for s.blockEntryIdx < len(s.blockEntries) && docId > s.blockEntries[s.blockEntryIdx].MaxId {
|
| 471 |
+
|
| 472 |
+
s.blockEntryIdx++
|
| 473 |
+
s.blockDataIdx = 0
|
| 474 |
+
s.decoded = false
|
| 475 |
+
s.freqDecoded = false
|
| 476 |
+
if s.blockEntryIdx >= len(s.blockEntries) {
|
| 477 |
+
s.exhaust()
|
| 478 |
+
return
|
| 479 |
+
}
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
if (s.blockEntryIdx == len(s.blockEntries)-1 && docId > s.blockEntries[s.blockEntryIdx].MaxId) || s.blockEntryIdx >= len(s.blockEntries) {
|
| 483 |
+
s.exhaust()
|
| 484 |
+
return
|
| 485 |
+
}
|
| 486 |
+
s.idPointer = s.blockEntries[s.blockEntryIdx-1].MaxId
|
| 487 |
+
s.currentBlockMaxId = s.blockEntries[s.blockEntryIdx].MaxId
|
| 488 |
+
s.currentBlockImpact = s.computeCurrentBlockImpact()
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
func (s *SegmentBlockMax) Idf() float64 {
|
| 492 |
+
return s.idf
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
func (s *SegmentBlockMax) IdPointer() uint64 {
|
| 496 |
+
return s.idPointer
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
func (s *SegmentBlockMax) Exhausted() bool {
|
| 500 |
+
return s.exhausted
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
func (s *SegmentBlockMax) Count() int {
|
| 504 |
+
return int(s.docCount)
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
func (s *SegmentBlockMax) QueryTermIndex() int {
|
| 508 |
+
return s.queryTermIndex
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
func (s *SegmentBlockMax) QueryTerm() string {
|
| 512 |
+
return string(s.node.Key)
|
| 513 |
+
}
|
| 514 |
+
|
| 515 |
+
func (s *SegmentBlockMax) Score(averagePropLength float64, additionalExplanation bool) (uint64, float64, *terms.DocPointerWithScore) {
|
| 516 |
+
if s.exhausted {
|
| 517 |
+
return 0, 0, nil
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
var doc *terms.DocPointerWithScore
|
| 521 |
+
|
| 522 |
+
if !s.freqDecoded {
|
| 523 |
+
s.decoders[1].DecodeReusable(s.blockDataEncoded.Tfs, s.blockDataDecoded.Tfs[:s.blockDataSize])
|
| 524 |
+
s.freqDecoded = true
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
freq := float64(s.blockDataDecoded.Tfs[s.blockDataIdx])
|
| 528 |
+
propLength := s.propLengths[s.idPointer]
|
| 529 |
+
tf := freq / (freq + s.k1*((1-s.b)+s.b*(float64(propLength)/s.averagePropLength)))
|
| 530 |
+
s.Metrics.DocCountScored++
|
| 531 |
+
if s.blockEntryIdx != s.Metrics.LastAddedBlock {
|
| 532 |
+
s.Metrics.BlockCountDecodedFreqs++
|
| 533 |
+
s.Metrics.DocCountDecodedFreqs += uint64(s.blockDataSize)
|
| 534 |
+
s.Metrics.LastAddedBlock = s.blockEntryIdx
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
if additionalExplanation {
|
| 538 |
+
doc = &terms.DocPointerWithScore{
|
| 539 |
+
Id: s.idPointer,
|
| 540 |
+
Frequency: float32(freq),
|
| 541 |
+
PropLength: float32(propLength),
|
| 542 |
+
}
|
| 543 |
+
}
|
| 544 |
+
score := tf * s.idf * s.propertyBoost
|
| 545 |
+
return s.idPointer, score, doc
|
| 546 |
+
}
|
| 547 |
+
|
| 548 |
+
func (s *SegmentBlockMax) Advance() {
|
| 549 |
+
if s.exhausted {
|
| 550 |
+
return
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
if !s.decoded {
|
| 554 |
+
s.decodeBlock()
|
| 555 |
+
return
|
| 556 |
+
}
|
| 557 |
+
|
| 558 |
+
s.blockDataIdx++
|
| 559 |
+
if s.blockDataIdx >= s.blockDataSize {
|
| 560 |
+
s.blockEntryIdx++
|
| 561 |
+
s.blockDataIdx = 0
|
| 562 |
+
s.decodeBlock()
|
| 563 |
+
if s.exhausted {
|
| 564 |
+
return
|
| 565 |
+
}
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
s.advanceOnTombstoneOrFilter()
|
| 569 |
+
}
|
| 570 |
+
|
| 571 |
+
func (s *SegmentBlockMax) computeCurrentBlockImpact() float32 {
|
| 572 |
+
if s.exhausted {
|
| 573 |
+
return 0
|
| 574 |
+
}
|
| 575 |
+
// for the fully decode blocks return the idf
|
| 576 |
+
if len(s.blockEntries) == 0 {
|
| 577 |
+
return float32(s.idf)
|
| 578 |
+
}
|
| 579 |
+
freq := float64(s.blockEntries[s.blockEntryIdx].MaxImpactTf)
|
| 580 |
+
propLength := float64(s.blockEntries[s.blockEntryIdx].MaxImpactPropLength)
|
| 581 |
+
return float32(s.idf * (freq / (freq + s.k1*(1-s.b+s.b*(propLength/s.averagePropLength)))) * s.propertyBoost)
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
func (s *SegmentBlockMax) CurrentBlockImpact() float32 {
|
| 585 |
+
return s.currentBlockImpact
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
func (s *SegmentBlockMax) CurrentBlockMaxId() uint64 {
|
| 589 |
+
return s.currentBlockMaxId
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
func (s *SegmentBlockMax) exhaust() {
|
| 593 |
+
s.idPointer = math.MaxUint64
|
| 594 |
+
s.currentBlockImpact = 0
|
| 595 |
+
s.idf = 0
|
| 596 |
+
s.currentBlockMaxId = math.MaxUint64
|
| 597 |
+
s.exhausted = true
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
func (s *SegmentBlockMax) SetIdf(idf float64) {
|
| 601 |
+
s.idf = idf
|
| 602 |
+
s.currentBlockImpact = s.computeCurrentBlockImpact()
|
| 603 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax_test.go
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"fmt"
|
| 16 |
+
"testing"
|
| 17 |
+
|
| 18 |
+
"github.com/weaviate/weaviate/entities/schema"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
func TestSerializeAndParseInvertedNodeTest(t *testing.T) {
|
| 22 |
+
t.Skip()
|
| 23 |
+
path := "/Users/amourao/code/weaviate/weaviate/data-weaviate-0/" +
|
| 24 |
+
"msmarco/6Jx2gaSLtsnd/lsm/property_text_searchable/segment-1729794337023372000.db"
|
| 25 |
+
cfg := segmentConfig{
|
| 26 |
+
mmapContents: false,
|
| 27 |
+
useBloomFilter: false,
|
| 28 |
+
calcCountNetAdditions: false,
|
| 29 |
+
overwriteDerived: true,
|
| 30 |
+
enableChecksumValidation: false,
|
| 31 |
+
}
|
| 32 |
+
seg, err := newSegment(path, nil, nil, nil, cfg)
|
| 33 |
+
if err != nil {
|
| 34 |
+
t.Fatalf("error creating segment: %v", err)
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
sbm := NewSegmentBlockMax(seg, []byte("and"), 0, 1, 1, nil, nil, 10, schema.BM25Config{K1: 1.2, B: 0.75})
|
| 38 |
+
|
| 39 |
+
sbm.AdvanceAtLeast(100)
|
| 40 |
+
id, score, pair := sbm.Score(1, false)
|
| 41 |
+
sbm.Advance()
|
| 42 |
+
fmt.Println(id, score, pair)
|
| 43 |
+
sbm.AdvanceAtLeast(16000)
|
| 44 |
+
sbm.AdvanceAtLeast(160000000)
|
| 45 |
+
|
| 46 |
+
fmt.Println(sbm)
|
| 47 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters.go
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"bytes"
|
| 16 |
+
"encoding/binary"
|
| 17 |
+
"fmt"
|
| 18 |
+
"hash/crc32"
|
| 19 |
+
"io"
|
| 20 |
+
"os"
|
| 21 |
+
"path/filepath"
|
| 22 |
+
"strings"
|
| 23 |
+
"time"
|
| 24 |
+
|
| 25 |
+
"github.com/weaviate/weaviate/usecases/byteops"
|
| 26 |
+
|
| 27 |
+
"github.com/bits-and-blooms/bloom/v3"
|
| 28 |
+
"github.com/pkg/errors"
|
| 29 |
+
"github.com/weaviate/weaviate/entities/diskio"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
func (s *segment) buildPath(template string) string {
|
| 33 |
+
isTmpFile := filepath.Ext(s.path) == ".tmp"
|
| 34 |
+
|
| 35 |
+
extless := strings.TrimSuffix(s.path, filepath.Ext(s.path))
|
| 36 |
+
if isTmpFile { // remove second extension
|
| 37 |
+
extless = strings.TrimSuffix(extless, filepath.Ext(extless))
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
path := fmt.Sprintf(template, extless)
|
| 41 |
+
if isTmpFile {
|
| 42 |
+
path = fmt.Sprintf("%s.tmp", path)
|
| 43 |
+
}
|
| 44 |
+
return path
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
func (s *segment) bloomFilterPath() string {
|
| 48 |
+
return s.buildPath("%s.bloom")
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
func (s *segment) bloomFilterSecondaryPath(pos int) string {
|
| 52 |
+
posTemplate := fmt.Sprintf(".%d.bloom", pos)
|
| 53 |
+
return s.buildPath("%s.secondary" + posTemplate)
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
func (s *segment) initBloomFilters(metrics *Metrics, overwrite bool, existingFilesList map[string]int64) error {
|
| 57 |
+
if err := s.initBloomFilter(overwrite, existingFilesList); err != nil {
|
| 58 |
+
return fmt.Errorf("init bloom filter for primary index: %w", err)
|
| 59 |
+
}
|
| 60 |
+
if s.secondaryIndexCount > 0 {
|
| 61 |
+
s.secondaryBloomFilters = make([]*bloom.BloomFilter, s.secondaryIndexCount)
|
| 62 |
+
for i := range s.secondaryBloomFilters {
|
| 63 |
+
if err := s.initSecondaryBloomFilter(i, overwrite, existingFilesList); err != nil {
|
| 64 |
+
return fmt.Errorf("init bloom filter for secondary index at %d: %w", i, err)
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
s.bloomFilterMetrics = newBloomFilterMetrics(metrics)
|
| 69 |
+
return nil
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
func (s *segment) initBloomFilter(overwrite bool, existingFilesList map[string]int64) error {
|
| 73 |
+
path := s.bloomFilterPath()
|
| 74 |
+
s.metaPaths = append(s.metaPaths, path)
|
| 75 |
+
|
| 76 |
+
loadFromDisk, err := fileExistsInList(existingFilesList, filepath.Base(path))
|
| 77 |
+
if err != nil {
|
| 78 |
+
return err
|
| 79 |
+
}
|
| 80 |
+
if loadFromDisk {
|
| 81 |
+
if overwrite {
|
| 82 |
+
err := os.Remove(path)
|
| 83 |
+
if err != nil {
|
| 84 |
+
return fmt.Errorf("delete existing bloom filter %s: %w", path, err)
|
| 85 |
+
}
|
| 86 |
+
} else {
|
| 87 |
+
err = s.loadBloomFilterFromDisk()
|
| 88 |
+
if err == nil {
|
| 89 |
+
return nil
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
if !errors.Is(err, ErrInvalidChecksum) {
|
| 93 |
+
// not a recoverable error
|
| 94 |
+
return err
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
// now continue re-calculating
|
| 98 |
+
}
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
before := time.Now()
|
| 102 |
+
|
| 103 |
+
if err := s.computeAndStoreBloomFilter(path); err != nil {
|
| 104 |
+
return err
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
took := time.Since(before)
|
| 108 |
+
|
| 109 |
+
s.logger.WithField("action", "lsm_init_disk_segment_build_bloom_filter_primary").
|
| 110 |
+
WithField("path", s.path).
|
| 111 |
+
WithField("took", took).
|
| 112 |
+
Debugf("building bloom filter took %s\n", took)
|
| 113 |
+
|
| 114 |
+
return nil
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
func (s *segment) computeAndStoreBloomFilter(path string) error {
|
| 118 |
+
keys, err := s.index.AllKeys()
|
| 119 |
+
if err != nil {
|
| 120 |
+
return err
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
s.bloomFilter = bloom.NewWithEstimates(uint(len(keys)), 0.001)
|
| 124 |
+
for _, key := range keys {
|
| 125 |
+
s.bloomFilter.Add(key)
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
if err := s.storeBloomFilterOnDisk(path); err != nil {
|
| 129 |
+
return fmt.Errorf("store bloom filter on disk: %w", err)
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
return nil
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
func (s *segment) storeBloomFilterOnDisk(path string) error {
|
| 136 |
+
bfSize := getBloomFilterSize(s.bloomFilter)
|
| 137 |
+
|
| 138 |
+
rw := byteops.NewReadWriter(make([]byte, bfSize+byteops.Uint32Len))
|
| 139 |
+
rw.MoveBufferPositionForward(byteops.Uint32Len) // leave space for checksum
|
| 140 |
+
_, err := s.bloomFilter.WriteTo(&rw)
|
| 141 |
+
if err != nil {
|
| 142 |
+
return fmt.Errorf("write bloom filter: %w", err)
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
return writeWithChecksum(rw, path, s.observeMetaWrite)
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
func (s *segment) loadBloomFilterFromDisk() error {
|
| 149 |
+
data, err := loadWithChecksum(s.bloomFilterPath(), -1, s.metrics.ReadObserver("loadBloomfilter"))
|
| 150 |
+
if err != nil {
|
| 151 |
+
return err
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
s.bloomFilter = new(bloom.BloomFilter)
|
| 155 |
+
_, err = s.bloomFilter.ReadFrom(bytes.NewReader(data))
|
| 156 |
+
if err != nil {
|
| 157 |
+
return fmt.Errorf("read bloom filter from disk: %w", err)
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
return nil
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
func (s *segment) initSecondaryBloomFilter(pos int, overwrite bool, existingFilesList map[string]int64) error {
|
| 164 |
+
before := time.Now()
|
| 165 |
+
|
| 166 |
+
path := s.bloomFilterSecondaryPath(pos)
|
| 167 |
+
s.metaPaths = append(s.metaPaths, path)
|
| 168 |
+
|
| 169 |
+
loadFromDisk, err := fileExistsInList(existingFilesList, filepath.Base(path))
|
| 170 |
+
if err != nil {
|
| 171 |
+
return err
|
| 172 |
+
}
|
| 173 |
+
if loadFromDisk {
|
| 174 |
+
if overwrite {
|
| 175 |
+
err := os.Remove(path)
|
| 176 |
+
if err != nil {
|
| 177 |
+
return fmt.Errorf("deleting existing secondary bloom filter %s: %w", path, err)
|
| 178 |
+
}
|
| 179 |
+
} else {
|
| 180 |
+
err = s.loadBloomFilterSecondaryFromDisk(pos)
|
| 181 |
+
if err == nil {
|
| 182 |
+
return nil
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
if !errors.Is(err, ErrInvalidChecksum) {
|
| 186 |
+
// not a recoverable error
|
| 187 |
+
return err
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
// now continue re-calculating
|
| 191 |
+
}
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
if err := s.computeAndStoreSecondaryBloomFilter(path, pos); err != nil {
|
| 195 |
+
return err
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
took := time.Since(before)
|
| 199 |
+
|
| 200 |
+
s.logger.WithField("action", "lsm_init_disk_segment_build_bloom_filter_secondary").
|
| 201 |
+
WithField("secondary_index_position", pos).
|
| 202 |
+
WithField("path", s.path).
|
| 203 |
+
WithField("took", took).
|
| 204 |
+
Debugf("building bloom filter took %s\n", took)
|
| 205 |
+
|
| 206 |
+
return nil
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
func (s *segment) computeAndStoreSecondaryBloomFilter(path string, pos int) error {
|
| 210 |
+
keys, err := s.secondaryIndices[pos].AllKeys()
|
| 211 |
+
if err != nil {
|
| 212 |
+
return err
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
s.secondaryBloomFilters[pos] = bloom.NewWithEstimates(uint(len(keys)), 0.001)
|
| 216 |
+
for _, key := range keys {
|
| 217 |
+
s.secondaryBloomFilters[pos].Add(key)
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
if err := s.storeBloomFilterSecondaryOnDisk(path, pos); err != nil {
|
| 221 |
+
return fmt.Errorf("store secondary bloom filter on disk: %w", err)
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
return nil
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
func (s *segment) storeBloomFilterSecondaryOnDisk(path string, pos int) error {
|
| 228 |
+
bfSize := getBloomFilterSize(s.bloomFilter)
|
| 229 |
+
|
| 230 |
+
rw := byteops.NewReadWriter(make([]byte, bfSize+byteops.Uint32Len))
|
| 231 |
+
rw.MoveBufferPositionForward(byteops.Uint32Len) // leave space for checksum
|
| 232 |
+
_, err := s.secondaryBloomFilters[pos].WriteTo(&rw)
|
| 233 |
+
if err != nil {
|
| 234 |
+
return fmt.Errorf("write bloom filter: %w", err)
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
return writeWithChecksum(rw, path, s.observeMetaWrite)
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
func (s *segment) loadBloomFilterSecondaryFromDisk(pos int) error {
|
| 241 |
+
data, err := loadWithChecksum(s.bloomFilterSecondaryPath(pos), -1, s.metrics.ReadObserver("loadSecondaryBloomFilter"))
|
| 242 |
+
if err != nil {
|
| 243 |
+
return err
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
s.secondaryBloomFilters[pos] = new(bloom.BloomFilter)
|
| 247 |
+
_, err = s.secondaryBloomFilters[pos].ReadFrom(bytes.NewReader(data))
|
| 248 |
+
if err != nil {
|
| 249 |
+
return fmt.Errorf("read bloom filter from disk: %w", err)
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
return nil
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
func fileExistsInList(nameList map[string]int64, filePath string) (bool, error) {
|
| 256 |
+
if nameList != nil {
|
| 257 |
+
_, ok := nameList[filePath]
|
| 258 |
+
return ok, nil
|
| 259 |
+
} else {
|
| 260 |
+
return fileExists(filePath)
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
// writeWithChecksum expects the data in the buffer to start at position byteops.Uint32Len so the
|
| 265 |
+
// checksum can be added into the same buffer at its start and everything can be written to the file
|
| 266 |
+
// in one go
|
| 267 |
+
func writeWithChecksum(bufWriter byteops.ReadWriter, path string, observeFileWriter diskio.MeteredWriterCallback) error {
|
| 268 |
+
// checksum needs to be at the start of the file
|
| 269 |
+
chksm := crc32.ChecksumIEEE(bufWriter.Buffer[byteops.Uint32Len:])
|
| 270 |
+
bufWriter.MoveBufferToAbsolutePosition(0)
|
| 271 |
+
bufWriter.WriteUint32(chksm)
|
| 272 |
+
f, err := os.Create(path)
|
| 273 |
+
if err != nil {
|
| 274 |
+
return fmt.Errorf("open file for writing: %w", err)
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
meteredW := diskio.NewMeteredWriter(f, observeFileWriter)
|
| 278 |
+
|
| 279 |
+
if _, err := meteredW.Write(bufWriter.Buffer); err != nil {
|
| 280 |
+
// ignoring f.Close() error here, as we don't care about whether the file
|
| 281 |
+
// was flushed, the call is mainly intended to prevent a file descriptor
|
| 282 |
+
// leak. We still want to return the original error below.
|
| 283 |
+
f.Close()
|
| 284 |
+
return fmt.Errorf("write bloom filter to disk: %w", err)
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
if err := f.Close(); err != nil {
|
| 288 |
+
return fmt.Errorf("close bloom filter file: %w", err)
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
return nil
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
// use negative length check to indicate that no length check should be
|
| 295 |
+
// performed
|
| 296 |
+
func loadWithChecksum(path string, lengthCheck int, observeFileReader BytesReadObserver) ([]byte, error) {
|
| 297 |
+
f, err := os.Open(path)
|
| 298 |
+
if err != nil {
|
| 299 |
+
return nil, err
|
| 300 |
+
}
|
| 301 |
+
defer f.Close()
|
| 302 |
+
meteredF := diskio.NewMeteredReader(f, diskio.MeteredReaderCallback(observeFileReader))
|
| 303 |
+
|
| 304 |
+
data, err := io.ReadAll(meteredF)
|
| 305 |
+
if err != nil {
|
| 306 |
+
return nil, err
|
| 307 |
+
}
|
| 308 |
+
if lengthCheck > 0 && len(data) != lengthCheck {
|
| 309 |
+
return nil, ErrInvalidChecksum
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
if len(data) < 4 {
|
| 313 |
+
// the file does not even contain the full checksum, we must consider it corrupt
|
| 314 |
+
return nil, ErrInvalidChecksum
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
chcksm := binary.LittleEndian.Uint32(data[:4])
|
| 318 |
+
actual := crc32.ChecksumIEEE(data[4:])
|
| 319 |
+
if chcksm != actual {
|
| 320 |
+
return nil, ErrInvalidChecksum
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
return data[4:], nil
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
func getBloomFilterSize(bf *bloom.BloomFilter) int {
|
| 327 |
+
// size of the bloom filter is size of the underlying bitSet and two uint64 parameters
|
| 328 |
+
bs := bf.BitSet()
|
| 329 |
+
bsSize := bs.BinaryStorageSize()
|
| 330 |
+
return bsSize + 2*byteops.Uint64Len
|
| 331 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters_test.go
ADDED
|
@@ -0,0 +1,642 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"context"
|
| 16 |
+
"encoding/binary"
|
| 17 |
+
"fmt"
|
| 18 |
+
"hash/crc32"
|
| 19 |
+
"io"
|
| 20 |
+
"math/rand"
|
| 21 |
+
"os"
|
| 22 |
+
"path"
|
| 23 |
+
"testing"
|
| 24 |
+
|
| 25 |
+
"github.com/sirupsen/logrus/hooks/test"
|
| 26 |
+
"github.com/stretchr/testify/assert"
|
| 27 |
+
"github.com/stretchr/testify/require"
|
| 28 |
+
"github.com/weaviate/weaviate/entities/cyclemanager"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
func TestCreateBloomOnFlush(t *testing.T) {
|
| 32 |
+
ctx := context.Background()
|
| 33 |
+
dirName := t.TempDir()
|
| 34 |
+
|
| 35 |
+
logger, _ := test.NewNullLogger()
|
| 36 |
+
|
| 37 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 38 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 39 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 40 |
+
require.Nil(t, err)
|
| 41 |
+
|
| 42 |
+
require.Nil(t, b.Put([]byte("hello"), []byte("world"),
|
| 43 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 44 |
+
require.Nil(t, b.FlushMemtable())
|
| 45 |
+
|
| 46 |
+
files, err := os.ReadDir(dirName)
|
| 47 |
+
require.Nil(t, err)
|
| 48 |
+
|
| 49 |
+
_, ok := findFileWithExt(files, ".bloom")
|
| 50 |
+
assert.True(t, ok)
|
| 51 |
+
|
| 52 |
+
_, ok = findFileWithExt(files, "secondary.0.bloom")
|
| 53 |
+
assert.True(t, ok)
|
| 54 |
+
// on Windows we have to shutdown the bucket before opening it again
|
| 55 |
+
require.Nil(t, b.Shutdown(ctx))
|
| 56 |
+
|
| 57 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 58 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 59 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 60 |
+
require.Nil(t, err)
|
| 61 |
+
defer b2.Shutdown(ctx)
|
| 62 |
+
|
| 63 |
+
valuePrimary, err := b2.Get([]byte("hello"))
|
| 64 |
+
require.Nil(t, err)
|
| 65 |
+
valueSecondary, err := b2.GetBySecondary(0, []byte("bonjour"))
|
| 66 |
+
require.Nil(t, err)
|
| 67 |
+
|
| 68 |
+
assert.Equal(t, []byte("world"), valuePrimary)
|
| 69 |
+
assert.Equal(t, []byte("world"), valueSecondary)
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
func TestCreateBloomInit(t *testing.T) {
|
| 73 |
+
// this test deletes the initial bloom and makes sure it gets recreated after
|
| 74 |
+
// the bucket is initialized
|
| 75 |
+
ctx := context.Background()
|
| 76 |
+
dirName := t.TempDir()
|
| 77 |
+
|
| 78 |
+
logger, _ := test.NewNullLogger()
|
| 79 |
+
|
| 80 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 81 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 82 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 83 |
+
require.Nil(t, err)
|
| 84 |
+
defer b.Shutdown(ctx)
|
| 85 |
+
|
| 86 |
+
require.Nil(t, b.Put([]byte("hello"), []byte("world"),
|
| 87 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 88 |
+
require.Nil(t, b.FlushMemtable())
|
| 89 |
+
|
| 90 |
+
for _, ext := range []string{".secondary.0.bloom", ".bloom"} {
|
| 91 |
+
files, err := os.ReadDir(dirName)
|
| 92 |
+
require.Nil(t, err)
|
| 93 |
+
fname, ok := findFileWithExt(files, ext)
|
| 94 |
+
require.True(t, ok)
|
| 95 |
+
|
| 96 |
+
err = os.RemoveAll(path.Join(dirName, fname))
|
| 97 |
+
require.Nil(t, err)
|
| 98 |
+
|
| 99 |
+
files, err = os.ReadDir(dirName)
|
| 100 |
+
require.Nil(t, err)
|
| 101 |
+
_, ok = findFileWithExt(files, ext)
|
| 102 |
+
require.False(t, ok, "verify the file is really gone")
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
require.Nil(t, b.Shutdown(ctx))
|
| 106 |
+
|
| 107 |
+
// now create a new bucket and assert that the file is re-created on init
|
| 108 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 109 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 110 |
+
WithStrategy(StrategyReplace))
|
| 111 |
+
require.Nil(t, err)
|
| 112 |
+
defer b2.Shutdown(ctx)
|
| 113 |
+
|
| 114 |
+
// just to ensure segments are loaded
|
| 115 |
+
cursor := b2.Cursor()
|
| 116 |
+
cursor.Close()
|
| 117 |
+
|
| 118 |
+
files, err := os.ReadDir(dirName)
|
| 119 |
+
require.Nil(t, err)
|
| 120 |
+
_, ok := findFileWithExt(files, ".bloom")
|
| 121 |
+
require.True(t, ok)
|
| 122 |
+
_, ok = findFileWithExt(files, ".secondary.0.bloom")
|
| 123 |
+
require.True(t, ok)
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
func TestRepairCorruptedBloomOnInit(t *testing.T) {
|
| 127 |
+
ctx := context.Background()
|
| 128 |
+
dirName := t.TempDir()
|
| 129 |
+
|
| 130 |
+
logger, _ := test.NewNullLogger()
|
| 131 |
+
|
| 132 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 133 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 134 |
+
WithStrategy(StrategyReplace))
|
| 135 |
+
require.Nil(t, err)
|
| 136 |
+
|
| 137 |
+
require.Nil(t, b.Put([]byte("hello"), []byte("world")))
|
| 138 |
+
require.Nil(t, b.FlushMemtable())
|
| 139 |
+
|
| 140 |
+
files, err := os.ReadDir(dirName)
|
| 141 |
+
require.Nil(t, err)
|
| 142 |
+
fname, ok := findFileWithExt(files, ".bloom")
|
| 143 |
+
require.True(t, ok)
|
| 144 |
+
|
| 145 |
+
// now corrupt the bloom filter by randomly overriding data
|
| 146 |
+
require.Nil(t, corruptBloomFile(path.Join(dirName, fname)))
|
| 147 |
+
// on Windows we have to shutdown the bucket before opening it again
|
| 148 |
+
require.Nil(t, b.Shutdown(ctx))
|
| 149 |
+
|
| 150 |
+
// now create a new bucket and assert that the file is ignored, re-created on
|
| 151 |
+
// init, and the count matches
|
| 152 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 153 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 154 |
+
WithStrategy(StrategyReplace))
|
| 155 |
+
require.Nil(t, err)
|
| 156 |
+
defer b2.Shutdown(ctx)
|
| 157 |
+
|
| 158 |
+
value, err := b2.Get([]byte("hello"))
|
| 159 |
+
assert.Nil(t, err)
|
| 160 |
+
assert.Equal(t, []byte("world"), value)
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
func TestRepairTooShortBloomOnInit(t *testing.T) {
|
| 164 |
+
ctx := context.Background()
|
| 165 |
+
dirName := t.TempDir()
|
| 166 |
+
|
| 167 |
+
logger, _ := test.NewNullLogger()
|
| 168 |
+
|
| 169 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 170 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 171 |
+
WithStrategy(StrategyReplace))
|
| 172 |
+
require.Nil(t, err)
|
| 173 |
+
|
| 174 |
+
require.Nil(t, b.Put([]byte("hello"), []byte("world")))
|
| 175 |
+
require.Nil(t, b.FlushMemtable())
|
| 176 |
+
|
| 177 |
+
files, err := os.ReadDir(dirName)
|
| 178 |
+
require.Nil(t, err)
|
| 179 |
+
fname, ok := findFileWithExt(files, ".bloom")
|
| 180 |
+
require.True(t, ok)
|
| 181 |
+
b.Shutdown(ctx)
|
| 182 |
+
|
| 183 |
+
// now corrupt the bloom filter by randomly overriding data
|
| 184 |
+
require.Nil(t, corruptBloomFileByTruncatingIt(path.Join(dirName, fname)))
|
| 185 |
+
|
| 186 |
+
// now create a new bucket and assert that the file is ignored, re-created on
|
| 187 |
+
// init, and the count matches
|
| 188 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 189 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 190 |
+
WithStrategy(StrategyReplace))
|
| 191 |
+
require.Nil(t, err)
|
| 192 |
+
defer b2.Shutdown(ctx)
|
| 193 |
+
|
| 194 |
+
value, err := b2.Get([]byte("hello"))
|
| 195 |
+
assert.Nil(t, err)
|
| 196 |
+
assert.Equal(t, []byte("world"), value)
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
func TestRepairCorruptedBloomSecondaryOnInit(t *testing.T) {
|
| 200 |
+
ctx := context.Background()
|
| 201 |
+
dirName := t.TempDir()
|
| 202 |
+
|
| 203 |
+
logger, _ := test.NewNullLogger()
|
| 204 |
+
|
| 205 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 206 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 207 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 208 |
+
require.Nil(t, err)
|
| 209 |
+
|
| 210 |
+
require.Nil(t, b.Put([]byte("hello"), []byte("world"),
|
| 211 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 212 |
+
require.Nil(t, b.FlushMemtable())
|
| 213 |
+
|
| 214 |
+
files, err := os.ReadDir(dirName)
|
| 215 |
+
require.Nil(t, err)
|
| 216 |
+
fname, ok := findFileWithExt(files, "secondary.0.bloom")
|
| 217 |
+
require.True(t, ok)
|
| 218 |
+
|
| 219 |
+
// now corrupt the file by replacing the count value without adapting the checksum
|
| 220 |
+
require.Nil(t, corruptBloomFile(path.Join(dirName, fname)))
|
| 221 |
+
// on Windows we have to shutdown the bucket before opening it again
|
| 222 |
+
require.Nil(t, b.Shutdown(ctx))
|
| 223 |
+
|
| 224 |
+
// now create a new bucket and assert that the file is ignored, re-created on
|
| 225 |
+
// init, and the count matches
|
| 226 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 227 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 228 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 229 |
+
require.Nil(t, err)
|
| 230 |
+
defer b2.Shutdown(ctx)
|
| 231 |
+
|
| 232 |
+
value := make([]byte, 5)
|
| 233 |
+
value, _, err = b2.GetBySecondaryIntoMemory(0, []byte("bonjour"), value)
|
| 234 |
+
assert.Nil(t, err)
|
| 235 |
+
assert.Equal(t, []byte("world"), value)
|
| 236 |
+
|
| 237 |
+
err = b2.Delete([]byte("hello"))
|
| 238 |
+
assert.Nil(t, err)
|
| 239 |
+
|
| 240 |
+
v, err := b2.Get([]byte("hello"))
|
| 241 |
+
assert.Nil(t, err)
|
| 242 |
+
assert.Nil(t, v)
|
| 243 |
+
|
| 244 |
+
value, _, err = b2.GetBySecondaryIntoMemory(0, []byte("bonjour"), value)
|
| 245 |
+
assert.Nil(t, err)
|
| 246 |
+
assert.Nil(t, value)
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
func TestRepairCorruptedBloomSecondaryOnInitIntoMemory(t *testing.T) {
|
| 250 |
+
ctx := context.Background()
|
| 251 |
+
dirName := t.TempDir()
|
| 252 |
+
|
| 253 |
+
logger, _ := test.NewNullLogger()
|
| 254 |
+
|
| 255 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 256 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 257 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 258 |
+
require.Nil(t, err)
|
| 259 |
+
|
| 260 |
+
require.Nil(t, b.Put([]byte("hello"), []byte("world"),
|
| 261 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 262 |
+
require.Nil(t, b.FlushMemtable())
|
| 263 |
+
|
| 264 |
+
files, err := os.ReadDir(dirName)
|
| 265 |
+
require.Nil(t, err)
|
| 266 |
+
fname, ok := findFileWithExt(files, "secondary.0.bloom")
|
| 267 |
+
require.True(t, ok)
|
| 268 |
+
|
| 269 |
+
b.Shutdown(ctx)
|
| 270 |
+
|
| 271 |
+
// now corrupt the file by replacing the count value without adapting the checksum
|
| 272 |
+
require.Nil(t, corruptBloomFile(path.Join(dirName, fname)))
|
| 273 |
+
|
| 274 |
+
// now create a new bucket and assert that the file is ignored, re-created on
|
| 275 |
+
// init, and the count matches
|
| 276 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 277 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 278 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 279 |
+
require.Nil(t, err)
|
| 280 |
+
defer b2.Shutdown(ctx)
|
| 281 |
+
|
| 282 |
+
value, err := b2.GetBySecondary(0, []byte("bonjour"))
|
| 283 |
+
assert.Nil(t, err)
|
| 284 |
+
assert.Equal(t, []byte("world"), value)
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
func TestRepairTooShortBloomSecondaryOnInit(t *testing.T) {
|
| 288 |
+
ctx := context.Background()
|
| 289 |
+
dirName := t.TempDir()
|
| 290 |
+
|
| 291 |
+
logger, _ := test.NewNullLogger()
|
| 292 |
+
|
| 293 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 294 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 295 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 296 |
+
require.Nil(t, err)
|
| 297 |
+
|
| 298 |
+
require.Nil(t, b.Put([]byte("hello"), []byte("world"),
|
| 299 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 300 |
+
require.Nil(t, b.FlushMemtable())
|
| 301 |
+
|
| 302 |
+
files, err := os.ReadDir(dirName)
|
| 303 |
+
require.Nil(t, err)
|
| 304 |
+
fname, ok := findFileWithExt(files, "secondary.0.bloom")
|
| 305 |
+
require.True(t, ok)
|
| 306 |
+
|
| 307 |
+
b.Shutdown(ctx)
|
| 308 |
+
// now corrupt the file by replacing the count value without adapting the checksum
|
| 309 |
+
require.Nil(t, corruptBloomFileByTruncatingIt(path.Join(dirName, fname)))
|
| 310 |
+
|
| 311 |
+
// now create a new bucket and assert that the file is ignored, re-created on
|
| 312 |
+
// init, and the count matches
|
| 313 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 314 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 315 |
+
WithStrategy(StrategyReplace), WithSecondaryIndices(1))
|
| 316 |
+
require.Nil(t, err)
|
| 317 |
+
defer b2.Shutdown(ctx)
|
| 318 |
+
|
| 319 |
+
value, err := b2.GetBySecondary(0, []byte("bonjour"))
|
| 320 |
+
assert.Nil(t, err)
|
| 321 |
+
assert.Equal(t, []byte("world"), value)
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
func TestLoadWithChecksumErrorCases(t *testing.T) {
|
| 325 |
+
t.Run("file does not exist", func(t *testing.T) {
|
| 326 |
+
dirName := t.TempDir()
|
| 327 |
+
_, err := loadWithChecksum(path.Join(dirName, "my-file"), -1, nil)
|
| 328 |
+
assert.NotNil(t, err)
|
| 329 |
+
})
|
| 330 |
+
|
| 331 |
+
t.Run("file has incorrect length", func(t *testing.T) {
|
| 332 |
+
dirName := t.TempDir()
|
| 333 |
+
fName := path.Join(dirName, "my-file")
|
| 334 |
+
f, err := os.Create(fName)
|
| 335 |
+
require.Nil(t, err)
|
| 336 |
+
|
| 337 |
+
_, err = f.Write(make([]byte, 13))
|
| 338 |
+
require.Nil(t, err)
|
| 339 |
+
|
| 340 |
+
require.Nil(t, f.Close())
|
| 341 |
+
|
| 342 |
+
_, err = loadWithChecksum(path.Join(dirName, "my-file"), 17, nil)
|
| 343 |
+
assert.NotNil(t, err)
|
| 344 |
+
})
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
func BenchmarkLoading(b *testing.B) {
|
| 348 |
+
for _, val := range []int{10, 100, 1000, 10000} {
|
| 349 |
+
b.Run(fmt.Sprintf("%d", val), func(b *testing.B) {
|
| 350 |
+
dirName := b.TempDir()
|
| 351 |
+
fName := path.Join(dirName, fmt.Sprintf("my-file-%d", val))
|
| 352 |
+
f, err := os.Create(fName)
|
| 353 |
+
require.Nil(b, err)
|
| 354 |
+
data := make([]byte, val)
|
| 355 |
+
for i := 0; i < len(data); i++ {
|
| 356 |
+
data[i] = byte(rand.Intn(100))
|
| 357 |
+
}
|
| 358 |
+
chmsum := crc32.ChecksumIEEE(data[4:])
|
| 359 |
+
binary.LittleEndian.PutUint32(data[:4], chmsum)
|
| 360 |
+
_, err = f.Write(data)
|
| 361 |
+
require.NoError(b, err)
|
| 362 |
+
|
| 363 |
+
require.NoError(b, f.Sync())
|
| 364 |
+
require.NoError(b, f.Close())
|
| 365 |
+
b.ResetTimer()
|
| 366 |
+
for i := 0; i < b.N; i++ {
|
| 367 |
+
loadedData, err := loadWithChecksum(fName, len(data), nil)
|
| 368 |
+
require.NoError(b, err)
|
| 369 |
+
require.Equal(b, loadedData, data[4:])
|
| 370 |
+
}
|
| 371 |
+
})
|
| 372 |
+
}
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
func TestBloom_OFF(t *testing.T) {
|
| 376 |
+
ctx := context.Background()
|
| 377 |
+
tests := bucketTests{
|
| 378 |
+
{
|
| 379 |
+
name: "dontCreateBloom",
|
| 380 |
+
f: dontCreateBloom,
|
| 381 |
+
opts: []BucketOption{
|
| 382 |
+
WithStrategy(StrategyReplace),
|
| 383 |
+
WithSecondaryIndices(1),
|
| 384 |
+
WithUseBloomFilter(false),
|
| 385 |
+
},
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
name: "dontRecreateBloom",
|
| 389 |
+
f: dontRecreateBloom,
|
| 390 |
+
opts: []BucketOption{
|
| 391 |
+
WithStrategy(StrategyReplace),
|
| 392 |
+
WithSecondaryIndices(1),
|
| 393 |
+
WithUseBloomFilter(false),
|
| 394 |
+
},
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
name: "dontPrecomputeBloom",
|
| 398 |
+
f: dontPrecomputeBloom,
|
| 399 |
+
opts: []BucketOption{
|
| 400 |
+
WithStrategy(StrategyReplace),
|
| 401 |
+
WithSecondaryIndices(1),
|
| 402 |
+
WithUseBloomFilter(false),
|
| 403 |
+
},
|
| 404 |
+
},
|
| 405 |
+
}
|
| 406 |
+
tests.run(ctx, t)
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
func dontCreateBloom(ctx context.Context, t *testing.T, opts []BucketOption) {
|
| 410 |
+
dirName := t.TempDir()
|
| 411 |
+
logger, _ := test.NewNullLogger()
|
| 412 |
+
|
| 413 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 414 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 415 |
+
opts...)
|
| 416 |
+
require.NoError(t, err)
|
| 417 |
+
defer b.Shutdown(ctx)
|
| 418 |
+
|
| 419 |
+
t.Run("populate", func(t *testing.T) {
|
| 420 |
+
require.NoError(t, b.Put([]byte("hello"), []byte("world"),
|
| 421 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 422 |
+
require.NoError(t, b.FlushMemtable())
|
| 423 |
+
})
|
| 424 |
+
|
| 425 |
+
t.Run("check files", func(t *testing.T) {
|
| 426 |
+
files, err := os.ReadDir(dirName)
|
| 427 |
+
require.NoError(t, err)
|
| 428 |
+
|
| 429 |
+
_, ok := findFileWithExt(files, ".bloom")
|
| 430 |
+
assert.False(t, ok)
|
| 431 |
+
_, ok = findFileWithExt(files, "secondary.0.bloom")
|
| 432 |
+
assert.False(t, ok)
|
| 433 |
+
})
|
| 434 |
+
|
| 435 |
+
t.Run("search", func(t *testing.T) {
|
| 436 |
+
valuePrimary, err := b.Get([]byte("hello"))
|
| 437 |
+
require.NoError(t, err)
|
| 438 |
+
valueSecondary, err := b.GetBySecondary(0, []byte("bonjour"))
|
| 439 |
+
require.NoError(t, err)
|
| 440 |
+
|
| 441 |
+
assert.Equal(t, []byte("world"), valuePrimary)
|
| 442 |
+
assert.Equal(t, []byte("world"), valueSecondary)
|
| 443 |
+
})
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
func dontRecreateBloom(ctx context.Context, t *testing.T, opts []BucketOption) {
|
| 447 |
+
dirName := t.TempDir()
|
| 448 |
+
logger, _ := test.NewNullLogger()
|
| 449 |
+
|
| 450 |
+
t.Run("create, populate, shutdown", func(t *testing.T) {
|
| 451 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 452 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 453 |
+
opts...)
|
| 454 |
+
require.NoError(t, err)
|
| 455 |
+
defer b.Shutdown(ctx)
|
| 456 |
+
|
| 457 |
+
require.NoError(t, b.Put([]byte("hello"), []byte("world"),
|
| 458 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 459 |
+
require.NoError(t, b.FlushMemtable())
|
| 460 |
+
})
|
| 461 |
+
|
| 462 |
+
b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 463 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 464 |
+
opts...)
|
| 465 |
+
require.NoError(t, err)
|
| 466 |
+
defer b2.Shutdown(ctx)
|
| 467 |
+
|
| 468 |
+
t.Run("check files", func(t *testing.T) {
|
| 469 |
+
files, err := os.ReadDir(dirName)
|
| 470 |
+
require.NoError(t, err)
|
| 471 |
+
|
| 472 |
+
_, ok := findFileWithExt(files, ".bloom")
|
| 473 |
+
assert.False(t, ok)
|
| 474 |
+
_, ok = findFileWithExt(files, "secondary.0.bloom")
|
| 475 |
+
assert.False(t, ok)
|
| 476 |
+
})
|
| 477 |
+
|
| 478 |
+
t.Run("search", func(t *testing.T) {
|
| 479 |
+
valuePrimary, err := b2.Get([]byte("hello"))
|
| 480 |
+
require.NoError(t, err)
|
| 481 |
+
valueSecondary, err := b2.GetBySecondary(0, []byte("bonjour"))
|
| 482 |
+
require.NoError(t, err)
|
| 483 |
+
|
| 484 |
+
assert.Equal(t, []byte("world"), valuePrimary)
|
| 485 |
+
assert.Equal(t, []byte("world"), valueSecondary)
|
| 486 |
+
})
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
func dontPrecomputeBloom(ctx context.Context, t *testing.T, opts []BucketOption) {
|
| 490 |
+
dirName := t.TempDir()
|
| 491 |
+
logger, _ := test.NewNullLogger()
|
| 492 |
+
|
| 493 |
+
b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 494 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 495 |
+
opts...)
|
| 496 |
+
require.NoError(t, err)
|
| 497 |
+
defer b.Shutdown(ctx)
|
| 498 |
+
|
| 499 |
+
t.Run("populate, compact", func(t *testing.T) {
|
| 500 |
+
require.NoError(t, b.Put([]byte("hello"), []byte("world"),
|
| 501 |
+
WithSecondaryKey(0, []byte("bonjour"))))
|
| 502 |
+
require.NoError(t, b.FlushMemtable())
|
| 503 |
+
|
| 504 |
+
require.NoError(t, b.Put([]byte("hello2"), []byte("world2"),
|
| 505 |
+
WithSecondaryKey(0, []byte("bonjour2"))))
|
| 506 |
+
require.NoError(t, b.FlushMemtable())
|
| 507 |
+
|
| 508 |
+
compacted, err := b.disk.compactOnce()
|
| 509 |
+
require.NoError(t, err)
|
| 510 |
+
require.True(t, compacted)
|
| 511 |
+
})
|
| 512 |
+
|
| 513 |
+
t.Run("check files", func(t *testing.T) {
|
| 514 |
+
files, err := os.ReadDir(dirName)
|
| 515 |
+
require.NoError(t, err)
|
| 516 |
+
|
| 517 |
+
_, ok := findFileWithExt(files, ".bloom")
|
| 518 |
+
assert.False(t, ok)
|
| 519 |
+
_, ok = findFileWithExt(files, "secondary.0.bloom")
|
| 520 |
+
assert.False(t, ok)
|
| 521 |
+
})
|
| 522 |
+
|
| 523 |
+
t.Run("search", func(t *testing.T) {
|
| 524 |
+
valuePrimary, err := b.Get([]byte("hello"))
|
| 525 |
+
require.NoError(t, err)
|
| 526 |
+
valueSecondary, err := b.GetBySecondary(0, []byte("bonjour"))
|
| 527 |
+
require.NoError(t, err)
|
| 528 |
+
value2Primary, err := b.Get([]byte("hello2"))
|
| 529 |
+
require.NoError(t, err)
|
| 530 |
+
value2Secondary, err := b.GetBySecondary(0, []byte("bonjour2"))
|
| 531 |
+
require.NoError(t, err)
|
| 532 |
+
|
| 533 |
+
assert.Equal(t, []byte("world"), valuePrimary)
|
| 534 |
+
assert.Equal(t, []byte("world"), valueSecondary)
|
| 535 |
+
assert.Equal(t, []byte("world2"), value2Primary)
|
| 536 |
+
assert.Equal(t, []byte("world2"), value2Secondary)
|
| 537 |
+
})
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
func corruptBloomFile(fname string) error {
|
| 541 |
+
f, err := os.Open(fname)
|
| 542 |
+
if err != nil {
|
| 543 |
+
return err
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
data, err := io.ReadAll(f)
|
| 547 |
+
if err != nil {
|
| 548 |
+
return err
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
if err := f.Close(); err != nil {
|
| 552 |
+
return err
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
// corrupt it by setting all data bytes to 0x01
|
| 556 |
+
for i := 5; i < len(data); i++ {
|
| 557 |
+
data[i] = 0x01
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
f, err = os.Create(fname)
|
| 561 |
+
if err != nil {
|
| 562 |
+
return err
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
_, err = f.Write(data)
|
| 566 |
+
if err != nil {
|
| 567 |
+
return err
|
| 568 |
+
}
|
| 569 |
+
|
| 570 |
+
return f.Close()
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
func corruptBloomFileByTruncatingIt(fname string) error {
|
| 574 |
+
f, err := os.Open(fname)
|
| 575 |
+
if err != nil {
|
| 576 |
+
return err
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
data, err := io.ReadAll(f)
|
| 580 |
+
if err != nil {
|
| 581 |
+
return err
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
if err := f.Close(); err != nil {
|
| 585 |
+
return err
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
data = data[:2]
|
| 589 |
+
|
| 590 |
+
f, err = os.Create(fname)
|
| 591 |
+
if err != nil {
|
| 592 |
+
return err
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
_, err = f.Write(data)
|
| 596 |
+
if err != nil {
|
| 597 |
+
return err
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
return f.Close()
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
func BenchmarkName(b *testing.B) {
|
| 604 |
+
logger, _ := test.NewNullLogger()
|
| 605 |
+
fn := func(key []byte) (bool, error) { return true, nil }
|
| 606 |
+
|
| 607 |
+
for _, val := range []int{10, 100, 1000, 10000} {
|
| 608 |
+
b.Run(fmt.Sprintf("%d", val), func(b *testing.B) {
|
| 609 |
+
dirName := b.TempDir()
|
| 610 |
+
ctx := context.Background()
|
| 611 |
+
bu, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil,
|
| 612 |
+
cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(),
|
| 613 |
+
WithStrategy(StrategyReplace))
|
| 614 |
+
require.Nil(b, err)
|
| 615 |
+
|
| 616 |
+
for i := 0; i < val; i++ {
|
| 617 |
+
require.Nil(b, bu.Put([]byte(fmt.Sprintf("hello-%v", i)), []byte(fmt.Sprintf("world-%v", i))))
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
require.Nil(b, bu.FlushMemtable())
|
| 621 |
+
bu.Shutdown(ctx)
|
| 622 |
+
|
| 623 |
+
files, err := os.ReadDir(dirName)
|
| 624 |
+
require.NoError(b, err)
|
| 625 |
+
|
| 626 |
+
fnames, ok := findFileWithExt(files, ".db")
|
| 627 |
+
assert.True(b, ok)
|
| 628 |
+
assert.NotNil(b, fnames)
|
| 629 |
+
|
| 630 |
+
b.ResetTimer()
|
| 631 |
+
b.ReportAllocs()
|
| 632 |
+
for i := 0; i < b.N; i++ {
|
| 633 |
+
_, err := newSegment(path.Join(dirName, fnames), logger, nil, fn, segmentConfig{
|
| 634 |
+
mmapContents: false,
|
| 635 |
+
useBloomFilter: true,
|
| 636 |
+
overwriteDerived: true,
|
| 637 |
+
})
|
| 638 |
+
require.NoError(b, err)
|
| 639 |
+
}
|
| 640 |
+
})
|
| 641 |
+
}
|
| 642 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_cleaner_replace.go
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"bufio"
|
| 16 |
+
"errors"
|
| 17 |
+
"fmt"
|
| 18 |
+
"io"
|
| 19 |
+
"math"
|
| 20 |
+
|
| 21 |
+
"github.com/prometheus/client_golang/prometheus"
|
| 22 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 23 |
+
"github.com/weaviate/weaviate/entities/cyclemanager"
|
| 24 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 25 |
+
"github.com/weaviate/weaviate/usecases/monitoring"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
type segmentCleanerReplace struct {
|
| 29 |
+
w io.WriteSeeker
|
| 30 |
+
bufw *bufio.Writer
|
| 31 |
+
cursor *segmentCursorReplace
|
| 32 |
+
keyExistsFn keyExistsOnUpperSegmentsFunc
|
| 33 |
+
version uint16
|
| 34 |
+
level uint16
|
| 35 |
+
secondaryIndexCount uint16
|
| 36 |
+
scratchSpacePath string
|
| 37 |
+
enableChecksumValidation bool
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
func newSegmentCleanerReplace(w io.WriteSeeker, cursor *segmentCursorReplace,
|
| 41 |
+
keyExistsFn keyExistsOnUpperSegmentsFunc, level, secondaryIndexCount uint16,
|
| 42 |
+
scratchSpacePath string, enableChecksumValidation bool,
|
| 43 |
+
) *segmentCleanerReplace {
|
| 44 |
+
return &segmentCleanerReplace{
|
| 45 |
+
w: w,
|
| 46 |
+
bufw: bufio.NewWriterSize(w, 256*1024),
|
| 47 |
+
cursor: cursor,
|
| 48 |
+
keyExistsFn: keyExistsFn,
|
| 49 |
+
version: segmentindex.ChooseHeaderVersion(enableChecksumValidation),
|
| 50 |
+
level: level,
|
| 51 |
+
secondaryIndexCount: secondaryIndexCount,
|
| 52 |
+
scratchSpacePath: scratchSpacePath,
|
| 53 |
+
enableChecksumValidation: enableChecksumValidation,
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
func (p *segmentCleanerReplace) do(shouldAbort cyclemanager.ShouldAbortCallback) error {
|
| 58 |
+
if err := p.init(); err != nil {
|
| 59 |
+
return fmt.Errorf("init: %w", err)
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
segmentFile := segmentindex.NewSegmentFile(
|
| 63 |
+
segmentindex.WithBufferedWriter(p.bufw),
|
| 64 |
+
segmentindex.WithChecksumsDisabled(!p.enableChecksumValidation),
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
indexKeys, err := p.writeKeys(segmentFile, shouldAbort)
|
| 68 |
+
if err != nil {
|
| 69 |
+
return fmt.Errorf("write keys: %w", err)
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
if err := p.writeIndexes(segmentFile, indexKeys); err != nil {
|
| 73 |
+
return fmt.Errorf("write indices: %w", err)
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// flush buffered, so we can safely seek on underlying writer
|
| 77 |
+
if err := p.bufw.Flush(); err != nil {
|
| 78 |
+
return fmt.Errorf("flush buffered: %w", err)
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
var dataEnd uint64 = segmentindex.HeaderSize
|
| 82 |
+
if l := len(indexKeys); l > 0 {
|
| 83 |
+
dataEnd = uint64(indexKeys[l-1].ValueEnd)
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
if err := p.writeHeader(segmentFile, dataEnd); err != nil {
|
| 87 |
+
return fmt.Errorf("write header: %w", err)
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
if _, err := segmentFile.WriteChecksum(); err != nil {
|
| 91 |
+
return fmt.Errorf("write compactorSet segment checksum: %w", err)
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
return nil
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
func (p *segmentCleanerReplace) init() error {
|
| 98 |
+
// write a dummy header as its contents are not known yet.
|
| 99 |
+
// file will be sought to the beginning and overwritten with actual header
|
| 100 |
+
// at the very end
|
| 101 |
+
|
| 102 |
+
if _, err := p.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil {
|
| 103 |
+
return fmt.Errorf("write empty header: %w", err)
|
| 104 |
+
}
|
| 105 |
+
return nil
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
func (p *segmentCleanerReplace) writeKeys(f *segmentindex.SegmentFile,
|
| 109 |
+
shouldAbort cyclemanager.ShouldAbortCallback,
|
| 110 |
+
) ([]segmentindex.Key, error) {
|
| 111 |
+
// the (dummy) header was already written, this is our initial offset
|
| 112 |
+
offset := segmentindex.HeaderSize
|
| 113 |
+
|
| 114 |
+
var indexKeys []segmentindex.Key
|
| 115 |
+
var indexKey segmentindex.Key
|
| 116 |
+
var node segmentReplaceNode
|
| 117 |
+
var err error
|
| 118 |
+
var keyExists bool
|
| 119 |
+
|
| 120 |
+
i := 0
|
| 121 |
+
for node, err = p.cursor.firstWithAllKeys(); err == nil || errors.Is(err, lsmkv.Deleted); node, err = p.cursor.nextWithAllKeys() {
|
| 122 |
+
i++
|
| 123 |
+
if i%100 == 0 && shouldAbort() {
|
| 124 |
+
return nil, fmt.Errorf("should abort requested")
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
keyExists, err = p.keyExistsFn(node.primaryKey)
|
| 128 |
+
if err != nil {
|
| 129 |
+
break
|
| 130 |
+
}
|
| 131 |
+
if keyExists {
|
| 132 |
+
continue
|
| 133 |
+
}
|
| 134 |
+
nodeCopy := node
|
| 135 |
+
nodeCopy.offset = offset
|
| 136 |
+
indexKey, err = nodeCopy.KeyIndexAndWriteTo(f.BodyWriter())
|
| 137 |
+
if err != nil {
|
| 138 |
+
break
|
| 139 |
+
}
|
| 140 |
+
offset = indexKey.ValueEnd
|
| 141 |
+
indexKeys = append(indexKeys, indexKey)
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
if !errors.Is(err, lsmkv.NotFound) {
|
| 145 |
+
return nil, err
|
| 146 |
+
}
|
| 147 |
+
return indexKeys, nil
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
func (p *segmentCleanerReplace) writeIndexes(f *segmentindex.SegmentFile,
|
| 151 |
+
keys []segmentindex.Key,
|
| 152 |
+
) error {
|
| 153 |
+
indexes := &segmentindex.Indexes{
|
| 154 |
+
Keys: keys,
|
| 155 |
+
SecondaryIndexCount: p.secondaryIndexCount,
|
| 156 |
+
ScratchSpacePath: p.scratchSpacePath,
|
| 157 |
+
ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{
|
| 158 |
+
"strategy": StrategyReplace,
|
| 159 |
+
"operation": "cleanupWriteIndices",
|
| 160 |
+
}),
|
| 161 |
+
}
|
| 162 |
+
_, err := f.WriteIndexes(indexes, math.MaxInt64) // segment cleaner only runs for big files
|
| 163 |
+
return err
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
// writeHeader assumes that everything has been written to the underlying
|
| 167 |
+
// writer and it is now safe to seek to the beginning and override the initial
|
| 168 |
+
// header
|
| 169 |
+
func (p *segmentCleanerReplace) writeHeader(f *segmentindex.SegmentFile,
|
| 170 |
+
startOfIndex uint64,
|
| 171 |
+
) error {
|
| 172 |
+
if _, err := p.w.Seek(0, io.SeekStart); err != nil {
|
| 173 |
+
return fmt.Errorf("seek to beginning to write header: %w", err)
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
h := &segmentindex.Header{
|
| 177 |
+
Level: p.level,
|
| 178 |
+
Version: p.version,
|
| 179 |
+
SecondaryIndices: p.secondaryIndexCount,
|
| 180 |
+
Strategy: segmentindex.StrategyReplace,
|
| 181 |
+
IndexStart: startOfIndex,
|
| 182 |
+
}
|
| 183 |
+
// We have to write directly to compactor writer,
|
| 184 |
+
// since it has seeked back to start. The following
|
| 185 |
+
// call to f.WriteHeader will not write again.
|
| 186 |
+
if _, err := h.WriteTo(p.w); err != nil {
|
| 187 |
+
return err
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
if _, err := f.WriteHeader(h); err != nil {
|
| 191 |
+
return err
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
if _, err := p.w.Seek(0, io.SeekEnd); err != nil {
|
| 195 |
+
return fmt.Errorf("seek to end after writing header: %w", err)
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
p.bufw.Reset(p.w)
|
| 199 |
+
|
| 200 |
+
return nil
|
| 201 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_collection_strategy.go
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"encoding/binary"
|
| 16 |
+
"fmt"
|
| 17 |
+
|
| 18 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 19 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
func (s *segment) getCollection(key []byte) ([]value, error) {
|
| 23 |
+
if s.strategy != segmentindex.StrategySetCollection &&
|
| 24 |
+
s.strategy != segmentindex.StrategyMapCollection &&
|
| 25 |
+
s.strategy != segmentindex.StrategyInverted {
|
| 26 |
+
return nil, fmt.Errorf("get only possible for strategies %q, %q and %q, got %q",
|
| 27 |
+
StrategySetCollection, StrategyMapCollection, StrategyInverted, s.strategy)
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
if s.useBloomFilter && !s.bloomFilter.Test(key) {
|
| 31 |
+
return nil, lsmkv.NotFound
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
node, err := s.index.Get(key)
|
| 35 |
+
if err != nil {
|
| 36 |
+
return nil, err
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
// We need to copy the data we read from the segment exactly once in this
|
| 40 |
+
// place. This means that future processing can share this memory as much as
|
| 41 |
+
// it wants to, as it can now be considered immutable. If we didn't copy in
|
| 42 |
+
// this place it would only be safe to hold this data while still under the
|
| 43 |
+
// protection of the segmentGroup.maintenanceLock. This lock makes sure that
|
| 44 |
+
// no compaction is started during an ongoing read. However, as we could show
|
| 45 |
+
// as part of https://github.com/weaviate/weaviate/issues/1837
|
| 46 |
+
// further processing, such as map-decoding and eventually map-merging would
|
| 47 |
+
// happen inside the bucket.MapList() method. This scope has its own lock,
|
| 48 |
+
// but that lock can only protecting against flushing (i.e. changing the
|
| 49 |
+
// active/flushing memtable), not against removing the disk segment. If a
|
| 50 |
+
// compaction completes and the old segment is removed, we would be accessing
|
| 51 |
+
// invalid memory without the copy, thus leading to a SEGFAULT.
|
| 52 |
+
contentsCopy := make([]byte, node.End-node.Start)
|
| 53 |
+
if err = s.copyNode(contentsCopy, nodeOffset{node.Start, node.End}); err != nil {
|
| 54 |
+
return nil, err
|
| 55 |
+
}
|
| 56 |
+
if s.strategy == segmentindex.StrategyInverted {
|
| 57 |
+
return s.collectionStratParseDataInverted(contentsCopy)
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
return s.collectionStratParseData(contentsCopy)
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
func (s *segment) collectionStratParseData(in []byte) ([]value, error) {
|
| 64 |
+
if len(in) == 0 {
|
| 65 |
+
return nil, lsmkv.NotFound
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
offset := 0
|
| 69 |
+
|
| 70 |
+
valuesLen := binary.LittleEndian.Uint64(in[offset : offset+8])
|
| 71 |
+
offset += 8
|
| 72 |
+
|
| 73 |
+
values := make([]value, valuesLen)
|
| 74 |
+
valueIndex := 0
|
| 75 |
+
for valueIndex < int(valuesLen) {
|
| 76 |
+
values[valueIndex].tombstone = in[offset] == 0x01
|
| 77 |
+
offset += 1
|
| 78 |
+
|
| 79 |
+
valueLen := binary.LittleEndian.Uint64(in[offset : offset+8])
|
| 80 |
+
offset += 8
|
| 81 |
+
|
| 82 |
+
values[valueIndex].value = in[offset : offset+int(valueLen)]
|
| 83 |
+
offset += int(valueLen)
|
| 84 |
+
|
| 85 |
+
valueIndex++
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
return values, nil
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
func (s *segment) collectionStratParseDataInverted(in []byte) ([]value, error) {
|
| 92 |
+
if len(in) == 0 {
|
| 93 |
+
return nil, lsmkv.NotFound
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
offset := 0
|
| 97 |
+
|
| 98 |
+
valuesLen := binary.LittleEndian.Uint64(in[offset : offset+8])
|
| 99 |
+
// offset += 8
|
| 100 |
+
|
| 101 |
+
values := make([]value, valuesLen)
|
| 102 |
+
|
| 103 |
+
nodes, _ := decodeAndConvertFromBlocks(in)
|
| 104 |
+
|
| 105 |
+
valueIndex := 0
|
| 106 |
+
for _, node := range nodes {
|
| 107 |
+
buf := make([]byte, 16)
|
| 108 |
+
copy(buf, node.Key)
|
| 109 |
+
copy(buf[8:], node.Value)
|
| 110 |
+
values[valueIndex].tombstone = node.Tombstone
|
| 111 |
+
values[valueIndex].value = buf
|
| 112 |
+
|
| 113 |
+
valueIndex++
|
| 114 |
+
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
return values, nil
|
| 118 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group.go
ADDED
|
@@ -0,0 +1,1052 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"context"
|
| 16 |
+
"errors"
|
| 17 |
+
"fmt"
|
| 18 |
+
"io/fs"
|
| 19 |
+
"os"
|
| 20 |
+
"path/filepath"
|
| 21 |
+
"sort"
|
| 22 |
+
"strings"
|
| 23 |
+
"sync"
|
| 24 |
+
"time"
|
| 25 |
+
|
| 26 |
+
"github.com/sirupsen/logrus"
|
| 27 |
+
|
| 28 |
+
"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
|
| 29 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
|
| 30 |
+
"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
|
| 31 |
+
"github.com/weaviate/weaviate/entities/cyclemanager"
|
| 32 |
+
"github.com/weaviate/weaviate/entities/diskio"
|
| 33 |
+
"github.com/weaviate/weaviate/entities/lsmkv"
|
| 34 |
+
"github.com/weaviate/weaviate/entities/models"
|
| 35 |
+
"github.com/weaviate/weaviate/entities/schema"
|
| 36 |
+
"github.com/weaviate/weaviate/entities/storagestate"
|
| 37 |
+
"github.com/weaviate/weaviate/usecases/memwatch"
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
type SegmentGroup struct {
|
| 41 |
+
segments []Segment
|
| 42 |
+
|
| 43 |
+
// Lock() for changing the currently active segments, RLock() for normal
|
| 44 |
+
// operation
|
| 45 |
+
maintenanceLock sync.RWMutex
|
| 46 |
+
dir string
|
| 47 |
+
|
| 48 |
+
cursorsLock sync.RWMutex
|
| 49 |
+
activeCursors int
|
| 50 |
+
enqueuedSegments []Segment
|
| 51 |
+
|
| 52 |
+
// flushVsCompactLock is a simple synchronization mechanism between the
|
| 53 |
+
// compaction and flush cycle. In general, those are independent, however,
|
| 54 |
+
// there are parts of it that are not. See the comments of the routines
|
| 55 |
+
// interacting with this lock for more details.
|
| 56 |
+
flushVsCompactLock sync.Mutex
|
| 57 |
+
|
| 58 |
+
strategy string
|
| 59 |
+
|
| 60 |
+
compactionCallbackCtrl cyclemanager.CycleCallbackCtrl
|
| 61 |
+
|
| 62 |
+
logger logrus.FieldLogger
|
| 63 |
+
|
| 64 |
+
// for backward-compatibility with states where the disk state for maps was
|
| 65 |
+
// not guaranteed to be sorted yet
|
| 66 |
+
mapRequiresSorting bool
|
| 67 |
+
|
| 68 |
+
status storagestate.Status
|
| 69 |
+
statusLock sync.Mutex
|
| 70 |
+
metrics *Metrics
|
| 71 |
+
|
| 72 |
+
// all "replace" buckets support counting through net additions, but not all
|
| 73 |
+
// produce a meaningful count. Typically, the only count we're interested in
|
| 74 |
+
// is that of the bucket that holds objects
|
| 75 |
+
monitorCount bool
|
| 76 |
+
|
| 77 |
+
mmapContents bool
|
| 78 |
+
keepTombstones bool // see bucket for more details
|
| 79 |
+
useBloomFilter bool // see bucket for more details
|
| 80 |
+
calcCountNetAdditions bool // see bucket for more details
|
| 81 |
+
compactLeftOverSegments bool // see bucket for more details
|
| 82 |
+
enableChecksumValidation bool
|
| 83 |
+
MinMMapSize int64
|
| 84 |
+
keepLevelCompaction bool // see bucket for more details
|
| 85 |
+
|
| 86 |
+
allocChecker memwatch.AllocChecker
|
| 87 |
+
maxSegmentSize int64
|
| 88 |
+
|
| 89 |
+
segmentCleaner segmentCleaner
|
| 90 |
+
cleanupInterval time.Duration
|
| 91 |
+
lastCleanupCall time.Time
|
| 92 |
+
lastCompactionCall time.Time
|
| 93 |
+
|
| 94 |
+
roaringSetRangeSegmentInMemory *roaringsetrange.SegmentInMemory
|
| 95 |
+
bitmapBufPool roaringset.BitmapBufPool
|
| 96 |
+
bm25config *schema.BM25Config
|
| 97 |
+
writeSegmentInfoIntoFileName bool
|
| 98 |
+
writeMetadata bool
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
type sgConfig struct {
|
| 102 |
+
dir string
|
| 103 |
+
strategy string
|
| 104 |
+
mapRequiresSorting bool
|
| 105 |
+
monitorCount bool
|
| 106 |
+
mmapContents bool
|
| 107 |
+
keepTombstones bool
|
| 108 |
+
useBloomFilter bool
|
| 109 |
+
calcCountNetAdditions bool
|
| 110 |
+
forceCompaction bool
|
| 111 |
+
keepLevelCompaction bool
|
| 112 |
+
maxSegmentSize int64
|
| 113 |
+
cleanupInterval time.Duration
|
| 114 |
+
enableChecksumValidation bool
|
| 115 |
+
keepSegmentsInMemory bool
|
| 116 |
+
MinMMapSize int64
|
| 117 |
+
bm25config *models.BM25Config
|
| 118 |
+
writeSegmentInfoIntoFileName bool
|
| 119 |
+
writeMetadata bool
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
func newSegmentGroup(ctx context.Context, logger logrus.FieldLogger, metrics *Metrics, cfg sgConfig,
|
| 123 |
+
compactionCallbacks cyclemanager.CycleCallbackGroup, b *Bucket, files map[string]int64,
|
| 124 |
+
) (*SegmentGroup, error) {
|
| 125 |
+
now := time.Now()
|
| 126 |
+
sg := &SegmentGroup{
|
| 127 |
+
segments: make([]Segment, len(files)),
|
| 128 |
+
dir: cfg.dir,
|
| 129 |
+
logger: logger,
|
| 130 |
+
metrics: metrics,
|
| 131 |
+
monitorCount: cfg.monitorCount,
|
| 132 |
+
mapRequiresSorting: cfg.mapRequiresSorting,
|
| 133 |
+
strategy: cfg.strategy,
|
| 134 |
+
mmapContents: cfg.mmapContents,
|
| 135 |
+
keepTombstones: cfg.keepTombstones,
|
| 136 |
+
useBloomFilter: cfg.useBloomFilter,
|
| 137 |
+
calcCountNetAdditions: cfg.calcCountNetAdditions,
|
| 138 |
+
compactLeftOverSegments: cfg.forceCompaction,
|
| 139 |
+
maxSegmentSize: cfg.maxSegmentSize,
|
| 140 |
+
cleanupInterval: cfg.cleanupInterval,
|
| 141 |
+
enableChecksumValidation: cfg.enableChecksumValidation,
|
| 142 |
+
allocChecker: b.allocChecker,
|
| 143 |
+
lastCompactionCall: now,
|
| 144 |
+
lastCleanupCall: now,
|
| 145 |
+
MinMMapSize: cfg.MinMMapSize,
|
| 146 |
+
writeSegmentInfoIntoFileName: cfg.writeSegmentInfoIntoFileName,
|
| 147 |
+
writeMetadata: cfg.writeMetadata,
|
| 148 |
+
bitmapBufPool: b.bitmapBufPool,
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
segmentIndex := 0
|
| 152 |
+
|
| 153 |
+
segmentsAlreadyRecoveredFromCompaction := make(map[string]struct{})
|
| 154 |
+
|
| 155 |
+
// Note: it's important to process first the compacted segments
|
| 156 |
+
// TODO: a single iteration may be possible
|
| 157 |
+
|
| 158 |
+
for entry := range files {
|
| 159 |
+
if filepath.Ext(entry) != ".tmp" {
|
| 160 |
+
continue
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
potentialCompactedSegmentFileName := strings.TrimSuffix(entry, ".tmp")
|
| 164 |
+
|
| 165 |
+
if filepath.Ext(potentialCompactedSegmentFileName) != ".db" {
|
| 166 |
+
// another kind of temporal file, ignore at this point but it may need to be deleted...
|
| 167 |
+
continue
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
jointSegments := segmentID(potentialCompactedSegmentFileName)
|
| 171 |
+
jointSegmentsIDs := strings.Split(jointSegments, "_")
|
| 172 |
+
|
| 173 |
+
if len(jointSegmentsIDs) == 1 {
|
| 174 |
+
// cleanup leftover, to be removed
|
| 175 |
+
if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil {
|
| 176 |
+
return nil, fmt.Errorf("delete partially cleaned segment %q: %w", entry, err)
|
| 177 |
+
}
|
| 178 |
+
continue
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
if len(jointSegmentsIDs) != 2 {
|
| 182 |
+
logger.WithField("action", "lsm_segment_init").
|
| 183 |
+
WithField("path", filepath.Join(sg.dir, entry)).
|
| 184 |
+
Warn("ignored (partially written) LSM compacted segment generated with a version older than v1.24.0")
|
| 185 |
+
|
| 186 |
+
continue
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
// jointSegmentsIDs[0] is the left segment, jointSegmentsIDs[1] is the right segment
|
| 190 |
+
leftSegmentFound, _ := segmentExistsWithID(jointSegmentsIDs[0], files)
|
| 191 |
+
rightSegmentFound, rightSegmentFilename := segmentExistsWithID(jointSegmentsIDs[1], files)
|
| 192 |
+
|
| 193 |
+
rightSegmentPath := filepath.Join(sg.dir, rightSegmentFilename)
|
| 194 |
+
|
| 195 |
+
if leftSegmentFound && rightSegmentFound {
|
| 196 |
+
delete(files, entry)
|
| 197 |
+
if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil {
|
| 198 |
+
return nil, fmt.Errorf("delete partially compacted segment %q: %w", entry, err)
|
| 199 |
+
}
|
| 200 |
+
continue
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
if leftSegmentFound && !rightSegmentFound {
|
| 204 |
+
return nil, fmt.Errorf("missing right segment %q", rightSegmentFilename)
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
var rightSegmentMetadata *struct {
|
| 208 |
+
Level uint16
|
| 209 |
+
Strategy segmentindex.Strategy
|
| 210 |
+
}
|
| 211 |
+
if !leftSegmentFound && rightSegmentFound {
|
| 212 |
+
// segment is initialized just to be erased
|
| 213 |
+
// there is no need of bloom filters nor net addition counter re-calculation
|
| 214 |
+
rightSegment, err := newSegment(rightSegmentPath, logger,
|
| 215 |
+
metrics, sg.makeExistsOn(nil),
|
| 216 |
+
segmentConfig{
|
| 217 |
+
mmapContents: sg.mmapContents,
|
| 218 |
+
useBloomFilter: sg.useBloomFilter,
|
| 219 |
+
calcCountNetAdditions: sg.calcCountNetAdditions,
|
| 220 |
+
overwriteDerived: false,
|
| 221 |
+
enableChecksumValidation: sg.enableChecksumValidation,
|
| 222 |
+
MinMMapSize: sg.MinMMapSize,
|
| 223 |
+
allocChecker: sg.allocChecker,
|
| 224 |
+
fileList: make(map[string]int64), // empty to not check if bloom/cna files already exist
|
| 225 |
+
writeMetadata: sg.writeMetadata,
|
| 226 |
+
})
|
| 227 |
+
if err != nil {
|
| 228 |
+
return nil, fmt.Errorf("init already compacted right segment %s: %w", rightSegmentFilename, err)
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
rightSegmentMetadata = &struct {
|
| 232 |
+
Level uint16
|
| 233 |
+
Strategy segmentindex.Strategy
|
| 234 |
+
}{
|
| 235 |
+
Level: rightSegment.getLevel(),
|
| 236 |
+
Strategy: rightSegment.getStrategy(),
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
err = rightSegment.close()
|
| 240 |
+
if err != nil {
|
| 241 |
+
return nil, fmt.Errorf("close already compacted right segment %s: %w", rightSegmentFilename, err)
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
// https://github.com/weaviate/weaviate/pull/6128 introduces the ability
|
| 245 |
+
// to drop segments delayed by renaming them first and then dropping them
|
| 246 |
+
// later.
|
| 247 |
+
//
|
| 248 |
+
// The existing functionality (previously .drop) was renamed to
|
| 249 |
+
// .dropImmediately. We are keeping the old behavior in this mainly for
|
| 250 |
+
// backward compatbility, but also because the motivation behind the
|
| 251 |
+
// delayed deletion does not apply here:
|
| 252 |
+
//
|
| 253 |
+
// The new behavior is meant to split the deletion into two steps, to
|
| 254 |
+
// reduce the time that an expensive lock – which could block readers -
|
| 255 |
+
// is held. In this scenario, the segment has not been initialized yet,
|
| 256 |
+
// so there is no one we could be blocking.
|
| 257 |
+
//
|
| 258 |
+
// The total time is the same, so we can also just drop it immediately.
|
| 259 |
+
err = rightSegment.dropImmediately()
|
| 260 |
+
if err != nil {
|
| 261 |
+
return nil, fmt.Errorf("delete already compacted right segment %s: %w", rightSegmentFilename, err)
|
| 262 |
+
}
|
| 263 |
+
delete(files, rightSegmentFilename)
|
| 264 |
+
|
| 265 |
+
err = diskio.Fsync(sg.dir)
|
| 266 |
+
if err != nil {
|
| 267 |
+
return nil, fmt.Errorf("fsync segment directory %s: %w", sg.dir, err)
|
| 268 |
+
}
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
var newRightSegmentFileName string
|
| 272 |
+
if cfg.writeSegmentInfoIntoFileName && rightSegmentMetadata != nil {
|
| 273 |
+
newRightSegmentFileName = fmt.Sprintf("segment-%s%s.db", jointSegmentsIDs[1], segmentExtraInfo(rightSegmentMetadata.Level, rightSegmentMetadata.Strategy))
|
| 274 |
+
} else {
|
| 275 |
+
newRightSegmentFileName = fmt.Sprintf("segment-%s.db", jointSegmentsIDs[1])
|
| 276 |
+
}
|
| 277 |
+
newRightSegmentPath := filepath.Join(sg.dir, newRightSegmentFileName)
|
| 278 |
+
|
| 279 |
+
if err := os.Rename(filepath.Join(sg.dir, entry), newRightSegmentPath); err != nil {
|
| 280 |
+
return nil, fmt.Errorf("rename compacted segment file %q as %q: %w", entry, newRightSegmentFileName, err)
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
var segment Segment
|
| 284 |
+
var err error
|
| 285 |
+
sgConf := segmentConfig{
|
| 286 |
+
mmapContents: sg.mmapContents,
|
| 287 |
+
useBloomFilter: sg.useBloomFilter,
|
| 288 |
+
calcCountNetAdditions: sg.calcCountNetAdditions,
|
| 289 |
+
overwriteDerived: true,
|
| 290 |
+
enableChecksumValidation: sg.enableChecksumValidation,
|
| 291 |
+
MinMMapSize: sg.MinMMapSize,
|
| 292 |
+
allocChecker: sg.allocChecker,
|
| 293 |
+
fileList: files,
|
| 294 |
+
writeMetadata: sg.writeMetadata,
|
| 295 |
+
}
|
| 296 |
+
if b.lazySegmentLoading {
|
| 297 |
+
segment, err = newLazySegment(newRightSegmentPath, logger,
|
| 298 |
+
metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), sgConf,
|
| 299 |
+
)
|
| 300 |
+
if err != nil {
|
| 301 |
+
return nil, fmt.Errorf("init lazy segment %s: %w", newRightSegmentFileName, err)
|
| 302 |
+
}
|
| 303 |
+
} else {
|
| 304 |
+
segment, err = newSegment(newRightSegmentPath, logger,
|
| 305 |
+
metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), sgConf,
|
| 306 |
+
)
|
| 307 |
+
if err != nil {
|
| 308 |
+
return nil, fmt.Errorf("init segment %s: %w", newRightSegmentFileName, err)
|
| 309 |
+
}
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
sg.segments[segmentIndex] = segment
|
| 313 |
+
segmentIndex++
|
| 314 |
+
|
| 315 |
+
segmentsAlreadyRecoveredFromCompaction[newRightSegmentFileName] = struct{}{}
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
for entry := range files {
|
| 319 |
+
if filepath.Ext(entry) == DeleteMarkerSuffix {
|
| 320 |
+
// marked for deletion, but never actually deleted. Delete now.
|
| 321 |
+
if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil {
|
| 322 |
+
// don't abort if the delete fails, we can still continue (albeit
|
| 323 |
+
// without freeing disk space that should have been freed)
|
| 324 |
+
sg.logger.WithError(err).WithFields(logrus.Fields{
|
| 325 |
+
"action": "lsm_segment_init_deleted_previously_marked_files",
|
| 326 |
+
"file": entry,
|
| 327 |
+
}).Error("failed to delete file already marked for deletion")
|
| 328 |
+
}
|
| 329 |
+
continue
|
| 330 |
+
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
if filepath.Ext(entry) != ".db" {
|
| 334 |
+
// skip, this could be commit log, etc.
|
| 335 |
+
continue
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
_, alreadyRecoveredFromCompaction := segmentsAlreadyRecoveredFromCompaction[entry]
|
| 339 |
+
if alreadyRecoveredFromCompaction {
|
| 340 |
+
// the .db file was already removed and restored from a compacted segment
|
| 341 |
+
continue
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
// before we can mount this file, we need to check if a WAL exists for it.
|
| 345 |
+
// If yes, we must assume that the flush never finished, as otherwise the
|
| 346 |
+
// WAL would have been deleted. Thus we must remove it.
|
| 347 |
+
walFileName, _, _ := strings.Cut(entry, ".")
|
| 348 |
+
walFileName += ".wal"
|
| 349 |
+
_, ok := files[walFileName]
|
| 350 |
+
if ok {
|
| 351 |
+
// the segment will be recovered from the WAL
|
| 352 |
+
err := os.Remove(filepath.Join(sg.dir, entry))
|
| 353 |
+
if err != nil {
|
| 354 |
+
return nil, fmt.Errorf("delete partially written segment %s: %w", entry, err)
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
logger.WithField("action", "lsm_segment_init").
|
| 358 |
+
WithField("path", filepath.Join(sg.dir, entry)).
|
| 359 |
+
WithField("wal_path", walFileName).
|
| 360 |
+
Info("discarded (partially written) LSM segment, because an active WAL for " +
|
| 361 |
+
"the same segment was found. A recovery from the WAL will follow.")
|
| 362 |
+
|
| 363 |
+
continue
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
var segment Segment
|
| 367 |
+
segConf := segmentConfig{
|
| 368 |
+
mmapContents: sg.mmapContents,
|
| 369 |
+
useBloomFilter: sg.useBloomFilter,
|
| 370 |
+
calcCountNetAdditions: sg.calcCountNetAdditions,
|
| 371 |
+
overwriteDerived: false,
|
| 372 |
+
enableChecksumValidation: sg.enableChecksumValidation,
|
| 373 |
+
MinMMapSize: sg.MinMMapSize,
|
| 374 |
+
allocChecker: sg.allocChecker,
|
| 375 |
+
fileList: files,
|
| 376 |
+
writeMetadata: sg.writeMetadata,
|
| 377 |
+
}
|
| 378 |
+
var err error
|
| 379 |
+
if b.lazySegmentLoading {
|
| 380 |
+
segment, err = newLazySegment(filepath.Join(sg.dir, entry), logger,
|
| 381 |
+
metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), segConf,
|
| 382 |
+
)
|
| 383 |
+
if err != nil {
|
| 384 |
+
return nil, fmt.Errorf("init lazy segment %s: %w", filepath.Join(sg.dir, entry), err)
|
| 385 |
+
}
|
| 386 |
+
} else {
|
| 387 |
+
segment, err = newSegment(filepath.Join(sg.dir, entry), logger,
|
| 388 |
+
metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), segConf,
|
| 389 |
+
)
|
| 390 |
+
if err != nil {
|
| 391 |
+
return nil, fmt.Errorf("init segment %s: %w", filepath.Join(sg.dir, entry), err)
|
| 392 |
+
}
|
| 393 |
+
}
|
| 394 |
+
sg.segments[segmentIndex] = segment
|
| 395 |
+
segmentIndex++
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
sg.segments = sg.segments[:segmentIndex]
|
| 399 |
+
|
| 400 |
+
// segment load order is as follows:
|
| 401 |
+
// - find .tmp files and recover them first
|
| 402 |
+
// - find .db files and load them
|
| 403 |
+
// - if there is a .wal file exists for a .db, remove the .db file
|
| 404 |
+
// - find .wal files and load them into a memtable
|
| 405 |
+
// - flush the memtable to a segment file
|
| 406 |
+
// Thus, files may be loaded in a different order than they were created,
|
| 407 |
+
// and we need to re-sort them to ensure the order is correct, as compations
|
| 408 |
+
// and other operations are based on the creation order of the segments
|
| 409 |
+
sort.Slice(sg.segments, func(i, j int) bool {
|
| 410 |
+
return sg.segments[i].getPath() < sg.segments[j].getPath()
|
| 411 |
+
})
|
| 412 |
+
|
| 413 |
+
// Actual strategy is stored in segment files. In case it is SetCollection,
|
| 414 |
+
// while new implementation uses bitmaps and supposed to be RoaringSet,
|
| 415 |
+
// bucket and segmentgroup strategy is changed back to SetCollection
|
| 416 |
+
// (memtables will be created later on, with already modified strategy)
|
| 417 |
+
// TODO what if only WAL files exists, and there is no segment to get actual strategy?
|
| 418 |
+
if b.strategy == StrategyRoaringSet && len(sg.segments) > 0 &&
|
| 419 |
+
sg.segments[0].getStrategy() == segmentindex.StrategySetCollection {
|
| 420 |
+
b.strategy = StrategySetCollection
|
| 421 |
+
b.desiredStrategy = StrategyRoaringSet
|
| 422 |
+
sg.strategy = StrategySetCollection
|
| 423 |
+
}
|
| 424 |
+
// As of v1.19 property's IndexInterval setting is replaced with
|
| 425 |
+
// IndexFilterable (roaring set) + IndexSearchable (map) and enabled by default.
|
| 426 |
+
// Buckets for text/text[] inverted indexes created before 1.19 have strategy
|
| 427 |
+
// map and name that since 1.19 is used by filterable indeverted index.
|
| 428 |
+
// Those buckets (roaring set by configuration, but in fact map) have to be
|
| 429 |
+
// renamed on startup by migrator. Here actual strategy is set based on
|
| 430 |
+
// data found in segment files
|
| 431 |
+
if b.strategy == StrategyRoaringSet && len(sg.segments) > 0 &&
|
| 432 |
+
sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection {
|
| 433 |
+
b.strategy = StrategyMapCollection
|
| 434 |
+
b.desiredStrategy = StrategyRoaringSet
|
| 435 |
+
sg.strategy = StrategyMapCollection
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
// Inverted segments share a lot of their logic as the MapCollection,
|
| 439 |
+
// and the main difference is in the way they store their data.
|
| 440 |
+
// Setting the desired strategy to Inverted will make sure that we can
|
| 441 |
+
// distinguish between the two strategies for search.
|
| 442 |
+
// The changes only apply when we have segments on disk,
|
| 443 |
+
// as the memtables will always be created with the MapCollection strategy.
|
| 444 |
+
if b.strategy == StrategyInverted && len(sg.segments) > 0 &&
|
| 445 |
+
sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection {
|
| 446 |
+
b.strategy = StrategyMapCollection
|
| 447 |
+
b.desiredStrategy = StrategyInverted
|
| 448 |
+
sg.strategy = StrategyMapCollection
|
| 449 |
+
} else if b.strategy == StrategyMapCollection && len(sg.segments) > 0 &&
|
| 450 |
+
sg.segments[0].getStrategy() == segmentindex.StrategyInverted {
|
| 451 |
+
// TODO amourao: blockmax "else" to be removed before final release
|
| 452 |
+
// in case bucket was created as inverted and default strategy was reverted to map
|
| 453 |
+
// by unsetting corresponding env variable
|
| 454 |
+
b.strategy = StrategyInverted
|
| 455 |
+
b.desiredStrategy = StrategyMapCollection
|
| 456 |
+
sg.strategy = StrategyInverted
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
if err := b.mayRecoverFromCommitLogs(ctx, sg, files); err != nil {
|
| 460 |
+
return nil, err
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
if sg.monitorCount {
|
| 464 |
+
sg.metrics.ObjectCount(sg.count())
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
sc, err := newSegmentCleaner(sg)
|
| 468 |
+
if err != nil {
|
| 469 |
+
return nil, err
|
| 470 |
+
}
|
| 471 |
+
sg.segmentCleaner = sc
|
| 472 |
+
|
| 473 |
+
// if a segment exists of the map collection strategy, we need to
|
| 474 |
+
// convert the inverted strategy to a map collection strategy
|
| 475 |
+
// as it is done on the bucket level
|
| 476 |
+
if sg.strategy == StrategyInverted && len(sg.segments) > 0 &&
|
| 477 |
+
sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection {
|
| 478 |
+
sg.strategy = StrategyMapCollection
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
switch sg.strategy {
|
| 482 |
+
case StrategyInverted:
|
| 483 |
+
// start with last but one segment, as the last one doesn't need tombstones for now
|
| 484 |
+
for i := len(sg.segments) - 2; i >= 0; i-- {
|
| 485 |
+
// avoid crashing if segment has no tombstones
|
| 486 |
+
tombstonesNext, err := sg.segments[i+1].ReadOnlyTombstones()
|
| 487 |
+
if err != nil {
|
| 488 |
+
return nil, fmt.Errorf("init segment %s: load tombstones %w", sg.segments[i+1].getPath(), err)
|
| 489 |
+
}
|
| 490 |
+
if _, err := sg.segments[i].MergeTombstones(tombstonesNext); err != nil {
|
| 491 |
+
return nil, fmt.Errorf("init segment %s: merge tombstones %w", sg.segments[i].getPath(), err)
|
| 492 |
+
}
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
case StrategyRoaringSetRange:
|
| 496 |
+
if cfg.keepSegmentsInMemory {
|
| 497 |
+
t := time.Now()
|
| 498 |
+
sg.roaringSetRangeSegmentInMemory = roaringsetrange.NewSegmentInMemory()
|
| 499 |
+
for _, seg := range sg.segments {
|
| 500 |
+
cursor := seg.newRoaringSetRangeCursor()
|
| 501 |
+
if err := sg.roaringSetRangeSegmentInMemory.MergeSegmentByCursor(cursor); err != nil {
|
| 502 |
+
return nil, fmt.Errorf("build segment-in-memory of strategy '%s': %w", sg.strategy, err)
|
| 503 |
+
}
|
| 504 |
+
}
|
| 505 |
+
logger.WithFields(logrus.Fields{
|
| 506 |
+
"took": time.Since(t).String(),
|
| 507 |
+
"bucket": filepath.Base(cfg.dir),
|
| 508 |
+
"size_mb": fmt.Sprintf("%.3f", float64(sg.roaringSetRangeSegmentInMemory.Size())/1024/1024),
|
| 509 |
+
}).Debug("rangeable segment-in-memory built")
|
| 510 |
+
}
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
id := "segmentgroup/compaction/" + sg.dir
|
| 514 |
+
sg.compactionCallbackCtrl = compactionCallbacks.Register(id, sg.compactOrCleanup)
|
| 515 |
+
|
| 516 |
+
return sg, nil
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
func (sg *SegmentGroup) makeExistsOn(segments []Segment) existsOnLowerSegmentsFn {
|
| 520 |
+
return func(key []byte) (bool, error) {
|
| 521 |
+
if len(segments) == 0 {
|
| 522 |
+
// this is already the lowest possible segment, we can guarantee that
|
| 523 |
+
// any key in this segment is previously unseen.
|
| 524 |
+
return false, nil
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
v, err := sg.getWithUpperSegmentBoundary(key, segments)
|
| 528 |
+
if err != nil {
|
| 529 |
+
return false, fmt.Errorf("check exists on segments: %w", err)
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
return v != nil, nil
|
| 533 |
+
}
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
func (sg *SegmentGroup) add(path string) error {
|
| 537 |
+
sg.maintenanceLock.Lock()
|
| 538 |
+
defer sg.maintenanceLock.Unlock()
|
| 539 |
+
|
| 540 |
+
segment, err := newSegment(path, sg.logger,
|
| 541 |
+
sg.metrics, sg.makeExistsOn(sg.segments),
|
| 542 |
+
segmentConfig{
|
| 543 |
+
mmapContents: sg.mmapContents,
|
| 544 |
+
useBloomFilter: sg.useBloomFilter,
|
| 545 |
+
calcCountNetAdditions: sg.calcCountNetAdditions,
|
| 546 |
+
overwriteDerived: true,
|
| 547 |
+
enableChecksumValidation: sg.enableChecksumValidation,
|
| 548 |
+
MinMMapSize: sg.MinMMapSize,
|
| 549 |
+
allocChecker: sg.allocChecker,
|
| 550 |
+
writeMetadata: sg.writeMetadata,
|
| 551 |
+
})
|
| 552 |
+
if err != nil {
|
| 553 |
+
return fmt.Errorf("init segment %s: %w", path, err)
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
sg.segments = append(sg.segments, segment)
|
| 557 |
+
return nil
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
func (sg *SegmentGroup) getAndLockSegments() (segments []Segment, release func()) {
|
| 561 |
+
sg.cursorsLock.RLock()
|
| 562 |
+
sg.maintenanceLock.RLock()
|
| 563 |
+
|
| 564 |
+
if len(sg.enqueuedSegments) == 0 {
|
| 565 |
+
return sg.segments, func() {
|
| 566 |
+
sg.cursorsLock.RUnlock()
|
| 567 |
+
sg.maintenanceLock.RUnlock()
|
| 568 |
+
}
|
| 569 |
+
}
|
| 570 |
+
|
| 571 |
+
segments = make([]Segment, 0, len(sg.segments)+len(sg.enqueuedSegments))
|
| 572 |
+
|
| 573 |
+
segments = append(segments, sg.segments...)
|
| 574 |
+
segments = append(segments, sg.enqueuedSegments...)
|
| 575 |
+
|
| 576 |
+
return segments, func() {
|
| 577 |
+
sg.cursorsLock.RUnlock()
|
| 578 |
+
sg.maintenanceLock.RUnlock()
|
| 579 |
+
}
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
func (sg *SegmentGroup) addInitializedSegment(segment *segment) error {
|
| 583 |
+
sg.cursorsLock.Lock()
|
| 584 |
+
defer sg.cursorsLock.Unlock()
|
| 585 |
+
|
| 586 |
+
if sg.activeCursors > 0 {
|
| 587 |
+
sg.enqueuedSegments = append(sg.enqueuedSegments, segment)
|
| 588 |
+
return nil
|
| 589 |
+
}
|
| 590 |
+
|
| 591 |
+
sg.maintenanceLock.Lock()
|
| 592 |
+
defer sg.maintenanceLock.Unlock()
|
| 593 |
+
|
| 594 |
+
sg.segments = append(sg.segments, segment)
|
| 595 |
+
return nil
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
func (sg *SegmentGroup) get(key []byte) ([]byte, error) {
|
| 599 |
+
beforeMaintenanceLock := time.Now()
|
| 600 |
+
segments, release := sg.getAndLockSegments()
|
| 601 |
+
defer release()
|
| 602 |
+
|
| 603 |
+
if time.Since(beforeMaintenanceLock) > 100*time.Millisecond {
|
| 604 |
+
sg.logger.WithField("duration", time.Since(beforeMaintenanceLock)).
|
| 605 |
+
WithField("action", "lsm_segment_group_get_obtain_maintenance_lock").
|
| 606 |
+
Debug("waited over 100ms to obtain maintenance lock in segment group get()")
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
return sg.getWithUpperSegmentBoundary(key, segments)
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
// not thread-safe on its own, as the assumption is that this is called from a
|
| 613 |
+
// lockholder, e.g. within .get()
|
| 614 |
+
func (sg *SegmentGroup) getWithUpperSegmentBoundary(key []byte, segments []Segment) ([]byte, error) {
|
| 615 |
+
// assumes "replace" strategy
|
| 616 |
+
|
| 617 |
+
// start with latest and exit as soon as something is found, thus making sure
|
| 618 |
+
// the latest takes presence
|
| 619 |
+
for i := len(segments) - 1; i >= 0; i-- {
|
| 620 |
+
beforeSegment := time.Now()
|
| 621 |
+
v, err := segments[i].get(key)
|
| 622 |
+
if time.Since(beforeSegment) > 100*time.Millisecond {
|
| 623 |
+
sg.logger.WithField("duration", time.Since(beforeSegment)).
|
| 624 |
+
WithField("action", "lsm_segment_group_get_individual_segment").
|
| 625 |
+
WithError(err).
|
| 626 |
+
WithField("segment_pos", i).
|
| 627 |
+
Debug("waited over 100ms to get result from individual segment")
|
| 628 |
+
}
|
| 629 |
+
if err != nil {
|
| 630 |
+
if errors.Is(err, lsmkv.NotFound) {
|
| 631 |
+
continue
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
if errors.Is(err, lsmkv.Deleted) {
|
| 635 |
+
return nil, nil
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
panic(fmt.Sprintf("unsupported error in segmentGroup.get(): %v", err))
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
return v, nil
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
return nil, nil
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
func (sg *SegmentGroup) getErrDeleted(key []byte) ([]byte, error) {
|
| 648 |
+
segments, release := sg.getAndLockSegments()
|
| 649 |
+
defer release()
|
| 650 |
+
|
| 651 |
+
return sg.getWithUpperSegmentBoundaryErrDeleted(key, segments)
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
func (sg *SegmentGroup) getWithUpperSegmentBoundaryErrDeleted(key []byte, segments []Segment) ([]byte, error) {
|
| 655 |
+
// assumes "replace" strategy
|
| 656 |
+
|
| 657 |
+
// start with latest and exit as soon as something is found, thus making sure
|
| 658 |
+
// the latest takes presence
|
| 659 |
+
for i := len(segments) - 1; i >= 0; i-- {
|
| 660 |
+
v, err := segments[i].get(key)
|
| 661 |
+
if err != nil {
|
| 662 |
+
if errors.Is(err, lsmkv.NotFound) {
|
| 663 |
+
continue
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
if errors.Is(err, lsmkv.Deleted) {
|
| 667 |
+
return nil, err
|
| 668 |
+
}
|
| 669 |
+
|
| 670 |
+
panic(fmt.Sprintf("unsupported error in segmentGroup.get(): %v", err))
|
| 671 |
+
}
|
| 672 |
+
|
| 673 |
+
return v, nil
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
return nil, lsmkv.NotFound
|
| 677 |
+
}
|
| 678 |
+
|
| 679 |
+
func (sg *SegmentGroup) getBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, []byte, error) {
|
| 680 |
+
segments, release := sg.getAndLockSegments()
|
| 681 |
+
defer release()
|
| 682 |
+
|
| 683 |
+
// assumes "replace" strategy
|
| 684 |
+
|
| 685 |
+
// start with latest and exit as soon as something is found, thus making sure
|
| 686 |
+
// the latest takes presence
|
| 687 |
+
for i := len(segments) - 1; i >= 0; i-- {
|
| 688 |
+
k, v, allocatedBuff, err := segments[i].getBySecondaryIntoMemory(pos, key, buffer)
|
| 689 |
+
if err != nil {
|
| 690 |
+
if errors.Is(err, lsmkv.NotFound) {
|
| 691 |
+
continue
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
if errors.Is(err, lsmkv.Deleted) {
|
| 695 |
+
return nil, nil, nil, nil
|
| 696 |
+
}
|
| 697 |
+
|
| 698 |
+
panic(fmt.Sprintf("unsupported error in segmentGroup.get(): %v", err))
|
| 699 |
+
}
|
| 700 |
+
|
| 701 |
+
return k, v, allocatedBuff, nil
|
| 702 |
+
}
|
| 703 |
+
|
| 704 |
+
return nil, nil, nil, nil
|
| 705 |
+
}
|
| 706 |
+
|
| 707 |
+
func (sg *SegmentGroup) getCollection(key []byte) ([]value, error) {
|
| 708 |
+
segments, release := sg.getAndLockSegments()
|
| 709 |
+
defer release()
|
| 710 |
+
|
| 711 |
+
var out []value
|
| 712 |
+
|
| 713 |
+
// start with first and do not exit
|
| 714 |
+
for _, segment := range segments {
|
| 715 |
+
v, err := segment.getCollection(key)
|
| 716 |
+
if err != nil {
|
| 717 |
+
if errors.Is(err, lsmkv.NotFound) {
|
| 718 |
+
continue
|
| 719 |
+
}
|
| 720 |
+
|
| 721 |
+
return nil, err
|
| 722 |
+
}
|
| 723 |
+
|
| 724 |
+
if len(out) == 0 {
|
| 725 |
+
out = v
|
| 726 |
+
} else {
|
| 727 |
+
out = append(out, v...)
|
| 728 |
+
}
|
| 729 |
+
}
|
| 730 |
+
|
| 731 |
+
return out, nil
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
func (sg *SegmentGroup) getCollectionAndSegments(key []byte) ([][]value, []Segment, func(), error) {
|
| 735 |
+
segments, release := sg.getAndLockSegments()
|
| 736 |
+
|
| 737 |
+
out := make([][]value, len(segments))
|
| 738 |
+
outSegments := make([]Segment, len(segments))
|
| 739 |
+
|
| 740 |
+
i := 0
|
| 741 |
+
// start with first and do not exit
|
| 742 |
+
for _, segment := range segments {
|
| 743 |
+
v, err := segment.getCollection(key)
|
| 744 |
+
if err != nil {
|
| 745 |
+
if !errors.Is(err, lsmkv.NotFound) {
|
| 746 |
+
release()
|
| 747 |
+
return nil, nil, func() {}, err
|
| 748 |
+
}
|
| 749 |
+
// inverted segments need to be loaded anyway, even if they don't have
|
| 750 |
+
// the key, as we need to know if they have tombstones
|
| 751 |
+
if segment.getStrategy() != segmentindex.StrategyInverted {
|
| 752 |
+
continue
|
| 753 |
+
}
|
| 754 |
+
}
|
| 755 |
+
|
| 756 |
+
out[i] = v
|
| 757 |
+
outSegments[i] = segment
|
| 758 |
+
i++
|
| 759 |
+
}
|
| 760 |
+
|
| 761 |
+
return out[:i], outSegments[:i], release, nil
|
| 762 |
+
}
|
| 763 |
+
|
| 764 |
+
func (sg *SegmentGroup) roaringSetGet(key []byte) (out roaringset.BitmapLayers, release func(), err error) {
|
| 765 |
+
segments, sgRelease := sg.getAndLockSegments()
|
| 766 |
+
defer sgRelease()
|
| 767 |
+
|
| 768 |
+
ln := len(segments)
|
| 769 |
+
if ln == 0 {
|
| 770 |
+
return nil, noopRelease, nil
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
release = noopRelease
|
| 774 |
+
// use bigger buffer for first layer, to make space for further merges
|
| 775 |
+
// with following layers
|
| 776 |
+
bitmapBufPool := roaringset.NewBitmapBufPoolFactorWrapper(sg.bitmapBufPool, 1.25)
|
| 777 |
+
|
| 778 |
+
i := 0
|
| 779 |
+
for ; i < ln; i++ {
|
| 780 |
+
layer, layerRelease, err := segments[i].roaringSetGet(key, bitmapBufPool)
|
| 781 |
+
if err == nil {
|
| 782 |
+
out = append(out, layer)
|
| 783 |
+
release = layerRelease
|
| 784 |
+
i++
|
| 785 |
+
break
|
| 786 |
+
}
|
| 787 |
+
if !errors.Is(err, lsmkv.NotFound) {
|
| 788 |
+
return nil, noopRelease, err
|
| 789 |
+
}
|
| 790 |
+
}
|
| 791 |
+
defer func() {
|
| 792 |
+
if err != nil {
|
| 793 |
+
release()
|
| 794 |
+
}
|
| 795 |
+
}()
|
| 796 |
+
|
| 797 |
+
for ; i < ln; i++ {
|
| 798 |
+
if err := segments[i].roaringSetMergeWith(key, out[0], sg.bitmapBufPool); err != nil {
|
| 799 |
+
return nil, noopRelease, err
|
| 800 |
+
}
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
return out, release, nil
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
func (sg *SegmentGroup) count() int {
|
| 807 |
+
segments, release := sg.getAndLockSegments()
|
| 808 |
+
defer release()
|
| 809 |
+
|
| 810 |
+
count := 0
|
| 811 |
+
for _, seg := range segments {
|
| 812 |
+
count += seg.getSegment().getCountNetAdditions()
|
| 813 |
+
}
|
| 814 |
+
|
| 815 |
+
return count
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
func (sg *SegmentGroup) Size() int64 {
|
| 819 |
+
segments, release := sg.getAndLockSegments()
|
| 820 |
+
defer release()
|
| 821 |
+
|
| 822 |
+
totalSize := int64(0)
|
| 823 |
+
for _, seg := range segments {
|
| 824 |
+
totalSize += int64(seg.getSize())
|
| 825 |
+
}
|
| 826 |
+
|
| 827 |
+
return totalSize
|
| 828 |
+
}
|
| 829 |
+
|
| 830 |
+
// MetadataSize returns the total size of metadata files (.bloom and .cna) from segments in memory
|
| 831 |
+
// MetadataSize returns the total size of metadata files for all segments.
|
| 832 |
+
// The calculation differs based on the writeMetadata setting:
|
| 833 |
+
//
|
| 834 |
+
// When writeMetadata is enabled:
|
| 835 |
+
// - Counts the actual file size of .metadata files on disk
|
| 836 |
+
// - Each .metadata file contains: header + bloom filters + count net additions
|
| 837 |
+
// - Header includes: checksum (4 bytes) + version (1 byte) + bloom len (4 bytes) + cna len (4 bytes) = 13 bytes
|
| 838 |
+
// - Bloom filters are serialized and stored inline
|
| 839 |
+
// - CNA data includes: uint64 count (8 bytes) + length indicator (4 bytes) = 12 bytes
|
| 840 |
+
//
|
| 841 |
+
// When writeMetadata is disabled:
|
| 842 |
+
// - Counts bloom filters in memory (getBloomFilterSize)
|
| 843 |
+
// - Counts .cna files separately (12 bytes each: 8 bytes data + 4 bytes checksum)
|
| 844 |
+
// - This represents the legacy behavior where metadata was stored separately
|
| 845 |
+
//
|
| 846 |
+
// The total size should be equivalent between both modes, accounting for the
|
| 847 |
+
// metadata file header overhead when writeMetadata is enabled.
|
| 848 |
+
func (sg *SegmentGroup) MetadataSize() int64 {
|
| 849 |
+
segments, release := sg.getAndLockSegments()
|
| 850 |
+
defer release()
|
| 851 |
+
|
| 852 |
+
var totalSize int64
|
| 853 |
+
for _, segment := range segments {
|
| 854 |
+
if sg.writeMetadata {
|
| 855 |
+
// When writeMetadata is enabled, count .metadata files
|
| 856 |
+
// Each .metadata file contains bloom filters + count net additions
|
| 857 |
+
if seg := segment.getSegment(); seg != nil {
|
| 858 |
+
// Check if segment has metadata file
|
| 859 |
+
metadataPath := seg.metadataPath()
|
| 860 |
+
if metadataPath != "" {
|
| 861 |
+
exists, err := fileExists(metadataPath)
|
| 862 |
+
if err == nil && exists {
|
| 863 |
+
// Get the actual file size of the metadata file
|
| 864 |
+
if info, err := os.Stat(metadataPath); err == nil {
|
| 865 |
+
totalSize += info.Size()
|
| 866 |
+
}
|
| 867 |
+
}
|
| 868 |
+
}
|
| 869 |
+
}
|
| 870 |
+
} else {
|
| 871 |
+
// When writeMetadata is disabled, count bloom filters and .cna files separately
|
| 872 |
+
if seg := segment.getSegment(); seg != nil {
|
| 873 |
+
// Count bloom filters in memory
|
| 874 |
+
if seg.bloomFilter != nil {
|
| 875 |
+
totalSize += int64(getBloomFilterSize(seg.bloomFilter))
|
| 876 |
+
}
|
| 877 |
+
// Count secondary bloom filters
|
| 878 |
+
for _, bf := range seg.secondaryBloomFilters {
|
| 879 |
+
if bf != nil {
|
| 880 |
+
totalSize += int64(getBloomFilterSize(bf))
|
| 881 |
+
}
|
| 882 |
+
}
|
| 883 |
+
}
|
| 884 |
+
|
| 885 |
+
// Count .cna files (12 bytes each)
|
| 886 |
+
if segment.getSegment().countNetPath() != "" {
|
| 887 |
+
// .cna files: uint64 count (8 bytes) + uint32 checksum (4 bytes) = 12 bytes
|
| 888 |
+
totalSize += 12
|
| 889 |
+
}
|
| 890 |
+
}
|
| 891 |
+
}
|
| 892 |
+
|
| 893 |
+
return totalSize
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
func (sg *SegmentGroup) shutdown(ctx context.Context) error {
|
| 897 |
+
if err := sg.compactionCallbackCtrl.Unregister(ctx); err != nil {
|
| 898 |
+
return fmt.Errorf("long-running compaction in progress: %w", ctx.Err())
|
| 899 |
+
}
|
| 900 |
+
if err := sg.segmentCleaner.close(); err != nil {
|
| 901 |
+
return err
|
| 902 |
+
}
|
| 903 |
+
|
| 904 |
+
sg.cursorsLock.Lock()
|
| 905 |
+
defer sg.cursorsLock.Unlock()
|
| 906 |
+
|
| 907 |
+
for _, seg := range sg.enqueuedSegments {
|
| 908 |
+
seg.close()
|
| 909 |
+
}
|
| 910 |
+
|
| 911 |
+
// Lock acquirement placed after compaction cycle stop request, due to occasional deadlock,
|
| 912 |
+
// because compaction logic used in cycle also requires maintenance lock.
|
| 913 |
+
//
|
| 914 |
+
// If lock is grabbed by shutdown method and compaction in cycle loop starts right after,
|
| 915 |
+
// it is blocked waiting for the same lock, eventually blocking entire cycle loop and preventing to read stop signal.
|
| 916 |
+
// If stop signal can not be read, shutdown will not receive stop result and will not proceed with further execution.
|
| 917 |
+
// Maintenance lock will then never be released.
|
| 918 |
+
sg.maintenanceLock.Lock()
|
| 919 |
+
defer sg.maintenanceLock.Unlock()
|
| 920 |
+
|
| 921 |
+
for _, seg := range sg.segments {
|
| 922 |
+
if err := seg.close(); err != nil {
|
| 923 |
+
return err
|
| 924 |
+
}
|
| 925 |
+
}
|
| 926 |
+
|
| 927 |
+
// make sure the segment list itself is set to nil. In case a memtable will
|
| 928 |
+
// still flush after closing, it might try to read from a disk segment list
|
| 929 |
+
// otherwise and run into nil-pointer problems.
|
| 930 |
+
sg.segments = nil
|
| 931 |
+
|
| 932 |
+
return nil
|
| 933 |
+
}
|
| 934 |
+
|
| 935 |
+
func (sg *SegmentGroup) UpdateStatus(status storagestate.Status) {
|
| 936 |
+
sg.statusLock.Lock()
|
| 937 |
+
defer sg.statusLock.Unlock()
|
| 938 |
+
|
| 939 |
+
sg.status = status
|
| 940 |
+
}
|
| 941 |
+
|
| 942 |
+
func (sg *SegmentGroup) isReadyOnly() bool {
|
| 943 |
+
sg.statusLock.Lock()
|
| 944 |
+
defer sg.statusLock.Unlock()
|
| 945 |
+
|
| 946 |
+
return sg.status == storagestate.StatusReadOnly
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
func fileExists(path string) (bool, error) {
|
| 950 |
+
_, err := os.Stat(path)
|
| 951 |
+
if err == nil {
|
| 952 |
+
return true, nil
|
| 953 |
+
}
|
| 954 |
+
|
| 955 |
+
if errors.Is(err, fs.ErrNotExist) {
|
| 956 |
+
return false, nil
|
| 957 |
+
}
|
| 958 |
+
|
| 959 |
+
return false, err
|
| 960 |
+
}
|
| 961 |
+
|
| 962 |
+
func segmentExistsWithID(segmentID string, files map[string]int64) (bool, string) {
|
| 963 |
+
// segment file format is "segment-{segmentID}.EXT" where EXT is either
|
| 964 |
+
// - ".db" if extra infos in filename are not used
|
| 965 |
+
// - ".{extra_infos}.db" if extra infos in filename are used
|
| 966 |
+
match := fmt.Sprintf("segment-%s.", segmentID)
|
| 967 |
+
for fileName := range files {
|
| 968 |
+
if strings.HasPrefix(fileName, match) && strings.HasSuffix(fileName, ".db") {
|
| 969 |
+
return true, fileName
|
| 970 |
+
}
|
| 971 |
+
}
|
| 972 |
+
return false, ""
|
| 973 |
+
}
|
| 974 |
+
|
| 975 |
+
func (sg *SegmentGroup) compactOrCleanup(shouldAbort cyclemanager.ShouldAbortCallback) bool {
|
| 976 |
+
sg.monitorSegments()
|
| 977 |
+
|
| 978 |
+
compact := func() bool {
|
| 979 |
+
sg.lastCompactionCall = time.Now()
|
| 980 |
+
compacted, err := sg.compactOnce()
|
| 981 |
+
if err != nil {
|
| 982 |
+
sg.logger.WithField("action", "lsm_compaction").
|
| 983 |
+
WithField("path", sg.dir).
|
| 984 |
+
WithError(err).
|
| 985 |
+
Errorf("compaction failed")
|
| 986 |
+
} else if !compacted {
|
| 987 |
+
sg.logger.WithField("action", "lsm_compaction").
|
| 988 |
+
WithField("path", sg.dir).
|
| 989 |
+
Trace("no segments eligible for compaction")
|
| 990 |
+
}
|
| 991 |
+
return compacted
|
| 992 |
+
}
|
| 993 |
+
cleanup := func() bool {
|
| 994 |
+
sg.lastCleanupCall = time.Now()
|
| 995 |
+
cleaned, err := sg.segmentCleaner.cleanupOnce(shouldAbort)
|
| 996 |
+
if err != nil {
|
| 997 |
+
sg.logger.WithField("action", "lsm_cleanup").
|
| 998 |
+
WithField("path", sg.dir).
|
| 999 |
+
WithError(err).
|
| 1000 |
+
Errorf("cleanup failed")
|
| 1001 |
+
}
|
| 1002 |
+
return cleaned
|
| 1003 |
+
}
|
| 1004 |
+
|
| 1005 |
+
// alternatively run compaction or cleanup first
|
| 1006 |
+
// if 1st one called succeeds, 2nd one is skipped, otherwise 2nd one is called as well
|
| 1007 |
+
//
|
| 1008 |
+
// compaction has the precedence over cleanup, however if cleanup
|
| 1009 |
+
// was not called for over [forceCleanupInterval], force at least one execution
|
| 1010 |
+
// in between compactions.
|
| 1011 |
+
// (ignore if compaction was not called within that time either)
|
| 1012 |
+
forceCleanupInterval := time.Hour * 12
|
| 1013 |
+
|
| 1014 |
+
if time.Since(sg.lastCleanupCall) > forceCleanupInterval && sg.lastCleanupCall.Before(sg.lastCompactionCall) {
|
| 1015 |
+
return cleanup() || compact()
|
| 1016 |
+
}
|
| 1017 |
+
return compact() || cleanup()
|
| 1018 |
+
}
|
| 1019 |
+
|
| 1020 |
+
func (sg *SegmentGroup) Len() int {
|
| 1021 |
+
segments, release := sg.getAndLockSegments()
|
| 1022 |
+
defer release()
|
| 1023 |
+
|
| 1024 |
+
return len(segments)
|
| 1025 |
+
}
|
| 1026 |
+
|
| 1027 |
+
func (sg *SegmentGroup) GetAveragePropertyLength() (float64, uint64) {
|
| 1028 |
+
segments, release := sg.getAndLockSegments()
|
| 1029 |
+
defer release()
|
| 1030 |
+
|
| 1031 |
+
if len(segments) == 0 {
|
| 1032 |
+
return 0, 0
|
| 1033 |
+
}
|
| 1034 |
+
|
| 1035 |
+
totalDocCount := uint64(0)
|
| 1036 |
+
for _, segment := range segments {
|
| 1037 |
+
invertedData := segment.getInvertedData()
|
| 1038 |
+
totalDocCount += invertedData.avgPropertyLengthsCount
|
| 1039 |
+
}
|
| 1040 |
+
|
| 1041 |
+
if totalDocCount == 0 {
|
| 1042 |
+
return defaultAveragePropLength, 0
|
| 1043 |
+
}
|
| 1044 |
+
|
| 1045 |
+
weightedAverage := 0.0
|
| 1046 |
+
for _, segment := range segments {
|
| 1047 |
+
invertedData := segment.getInvertedData()
|
| 1048 |
+
weightedAverage += float64(invertedData.avgPropertyLengthsCount) / float64(totalDocCount) * invertedData.avgPropertyLengthsAvg
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
return weightedAverage, totalDocCount
|
| 1052 |
+
}
|
platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup.go
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// _ _
|
| 2 |
+
// __ _____ __ ___ ___ __ _| |_ ___
|
| 3 |
+
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
|
| 4 |
+
// \ V V / __/ (_| |\ V /| | (_| | || __/
|
| 5 |
+
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
|
| 6 |
+
//
|
| 7 |
+
// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
|
| 8 |
+
//
|
| 9 |
+
// CONTACT: hello@weaviate.io
|
| 10 |
+
//
|
| 11 |
+
|
| 12 |
+
package lsmkv
|
| 13 |
+
|
| 14 |
+
import (
|
| 15 |
+
"encoding/binary"
|
| 16 |
+
"fmt"
|
| 17 |
+
"os"
|
| 18 |
+
"path/filepath"
|
| 19 |
+
"strconv"
|
| 20 |
+
"strings"
|
| 21 |
+
"time"
|
| 22 |
+
|
| 23 |
+
"github.com/pkg/errors"
|
| 24 |
+
|
| 25 |
+
"github.com/sirupsen/logrus"
|
| 26 |
+
"github.com/weaviate/weaviate/entities/cyclemanager"
|
| 27 |
+
"github.com/weaviate/weaviate/entities/diskio"
|
| 28 |
+
bolt "go.etcd.io/bbolt"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
const (
|
| 32 |
+
cleanupDbFileName = "cleanup.db.bolt"
|
| 33 |
+
emptyIdx = -1
|
| 34 |
+
minCleanupSizePercent = 10
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
var (
|
| 38 |
+
cleanupDbBucketSegments = []byte("segments")
|
| 39 |
+
cleanupDbBucketMeta = []byte("meta")
|
| 40 |
+
cleanupDbKeyMetaNextAllowedTs = []byte("nextAllowedTs")
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
type segmentCleaner interface {
|
| 44 |
+
close() error
|
| 45 |
+
cleanupOnce(shouldAbort cyclemanager.ShouldAbortCallback) (cleaned bool, err error)
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
func newSegmentCleaner(sg *SegmentGroup) (segmentCleaner, error) {
|
| 49 |
+
if sg.cleanupInterval <= 0 {
|
| 50 |
+
return &segmentCleanerNoop{}, nil
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
switch sg.strategy {
|
| 54 |
+
case StrategyReplace:
|
| 55 |
+
cleaner := &segmentCleanerCommon{sg: sg}
|
| 56 |
+
if err := cleaner.init(); err != nil {
|
| 57 |
+
return nil, err
|
| 58 |
+
}
|
| 59 |
+
return cleaner, nil
|
| 60 |
+
case StrategyMapCollection,
|
| 61 |
+
StrategySetCollection,
|
| 62 |
+
StrategyRoaringSet,
|
| 63 |
+
StrategyRoaringSetRange,
|
| 64 |
+
StrategyInverted:
|
| 65 |
+
return &segmentCleanerNoop{}, nil
|
| 66 |
+
default:
|
| 67 |
+
return nil, fmt.Errorf("unrecognized strategy %q", sg.strategy)
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
// ================================================================
|
| 72 |
+
|
| 73 |
+
type segmentCleanerNoop struct{}
|
| 74 |
+
|
| 75 |
+
func (c *segmentCleanerNoop) close() error {
|
| 76 |
+
return nil
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
func (c *segmentCleanerNoop) cleanupOnce(shouldAbort cyclemanager.ShouldAbortCallback) (bool, error) {
|
| 80 |
+
return false, nil
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
// ================================================================
|
| 84 |
+
|
| 85 |
+
// segmentCleanerCommon uses bolt db to persist data relevant to cleanup
|
| 86 |
+
// progress.
|
| 87 |
+
// db is stored in file named [cleanupDbFileName] in bucket directory, next to
|
| 88 |
+
// segment files.
|
| 89 |
+
//
|
| 90 |
+
// db uses 2 buckets:
|
| 91 |
+
// - [cleanupDbBucketMeta] to store global cleanup data
|
| 92 |
+
// - [cleanupDbBucketSegments] to store each segments cleanup data
|
| 93 |
+
//
|
| 94 |
+
// [cleanupDbBucketMeta] holds single key [cleanupDbKeyMetaNextAllowedTs] with value of
|
| 95 |
+
// timestamp of earliest of last segments' cleanups or last execution timestamp of findCandidate
|
| 96 |
+
// if no eligible cleanup candidate was found.
|
| 97 |
+
// [cleanupDbBucketSegments] holds multiple keys (being segment ids) with values being combined:
|
| 98 |
+
// - timestamp of current segment's cleanup
|
| 99 |
+
// - segmentId of last segment used in current segment's cleanup
|
| 100 |
+
// - size of current segment after cleanup
|
| 101 |
+
// Entries of segmentIds of segments that were removed (left segments after compaction)
|
| 102 |
+
// are regularly removed from cleanup db while next cleanup candidate is searched.
|
| 103 |
+
//
|
| 104 |
+
// cleanupInterval indicates minimal interval that have to pass for segment to be cleaned again.
|
| 105 |
+
// Each segment has stored its last cleanup timestamp in cleanup bolt db.
|
| 106 |
+
// Additionally "global" earliest cleanup timestamp is stored ([cleanupDbKeyMetaNextAllowedTs])
|
| 107 |
+
// or last execution timestamp of findCandiate method. This timeout is used to quickly exit
|
| 108 |
+
// findCandidate method without necessity to verify if interval passed for each segment.
|
| 109 |
+
type segmentCleanerCommon struct {
|
| 110 |
+
sg *SegmentGroup
|
| 111 |
+
db *bolt.DB
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
func (c *segmentCleanerCommon) init() error {
|
| 115 |
+
path := filepath.Join(c.sg.dir, cleanupDbFileName)
|
| 116 |
+
var db *bolt.DB
|
| 117 |
+
var err error
|
| 118 |
+
|
| 119 |
+
if db, err = bolt.Open(path, 0o600, nil); err != nil {
|
| 120 |
+
return fmt.Errorf("open cleanup bolt db %q: %w", path, err)
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
if err = db.Update(func(tx *bolt.Tx) error {
|
| 124 |
+
if _, err := tx.CreateBucketIfNotExists(cleanupDbBucketSegments); err != nil {
|
| 125 |
+
return err
|
| 126 |
+
}
|
| 127 |
+
if _, err := tx.CreateBucketIfNotExists(cleanupDbBucketMeta); err != nil {
|
| 128 |
+
return err
|
| 129 |
+
}
|
| 130 |
+
return nil
|
| 131 |
+
}); err != nil {
|
| 132 |
+
return fmt.Errorf("create bucket cleanup bolt db %q: %w", path, err)
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
c.db = db
|
| 136 |
+
return nil
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
func (c *segmentCleanerCommon) close() error {
|
| 140 |
+
if err := c.db.Close(); err != nil {
|
| 141 |
+
return fmt.Errorf("close cleanup bolt db %q: %w", c.db.Path(), err)
|
| 142 |
+
}
|
| 143 |
+
return nil
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// findCandidate returns index of segment that should be cleaned as next one,
|
| 147 |
+
// index of first newer segment to start cleanup from, index of last newer segment
|
| 148 |
+
// to finish cleanup on, callback to be executed after cleanup is successfully completed
|
| 149 |
+
// and error in case of issues occurred while finding candidate
|
| 150 |
+
func (c *segmentCleanerCommon) findCandidate() (int, int, int, onCompletedFunc, error) {
|
| 151 |
+
nowTs := time.Now().UnixNano()
|
| 152 |
+
nextAllowedTs := nowTs - int64(c.sg.cleanupInterval)
|
| 153 |
+
nextAllowedStoredTs := c.readNextAllowed()
|
| 154 |
+
|
| 155 |
+
if nextAllowedStoredTs > nextAllowedTs {
|
| 156 |
+
// too soon for next cleanup
|
| 157 |
+
return emptyIdx, emptyIdx, emptyIdx, nil, nil
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
ids, sizes, err := c.getSegmentIdsAndSizes()
|
| 161 |
+
if err != nil {
|
| 162 |
+
return emptyIdx, emptyIdx, emptyIdx, nil, err
|
| 163 |
+
}
|
| 164 |
+
if count := len(ids); count <= 1 {
|
| 165 |
+
// too few segments for cleanup, update next allowed timestamp for cleanup to now
|
| 166 |
+
if err := c.storeNextAllowed(nowTs); err != nil {
|
| 167 |
+
return emptyIdx, emptyIdx, emptyIdx, nil, err
|
| 168 |
+
}
|
| 169 |
+
return emptyIdx, emptyIdx, emptyIdx, nil, nil
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
// get idx and cleanup timestamp of earliest cleaned segment,
|
| 173 |
+
// take the opportunity to find obsolete segment keys to be deleted later from cleanup db
|
| 174 |
+
candidateIdx, startIdx, lastIdx, earliestCleanedTs, nonExistentSegmentKeys := c.readEarliestCleaned(ids, sizes, nowTs)
|
| 175 |
+
|
| 176 |
+
if err := c.deleteSegmentMetas(nonExistentSegmentKeys); err != nil {
|
| 177 |
+
return emptyIdx, emptyIdx, emptyIdx, nil, err
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
if candidateIdx != emptyIdx && earliestCleanedTs <= nextAllowedTs {
|
| 181 |
+
// candidate found and ready for cleanup
|
| 182 |
+
id := ids[candidateIdx]
|
| 183 |
+
lastProcessedId := ids[len(ids)-1]
|
| 184 |
+
onCompleted := func(size int64) error {
|
| 185 |
+
return c.storeSegmentMeta(id, lastProcessedId, size, nowTs)
|
| 186 |
+
}
|
| 187 |
+
return candidateIdx, startIdx, lastIdx, onCompleted, nil
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
// candidate not found or not ready for cleanup, update next allowed timestamp to earliest cleaned segment
|
| 191 |
+
// (which is "now" if candidate was not found)
|
| 192 |
+
if err := c.storeNextAllowed(earliestCleanedTs); err != nil {
|
| 193 |
+
return emptyIdx, emptyIdx, emptyIdx, nil, err
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
return emptyIdx, emptyIdx, emptyIdx, nil, nil
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
func (c *segmentCleanerCommon) getSegmentIdsAndSizes() ([]int64, []int64, error) {
|
| 200 |
+
segments, release := c.sg.getAndLockSegments()
|
| 201 |
+
defer release()
|
| 202 |
+
|
| 203 |
+
var ids []int64
|
| 204 |
+
var sizes []int64
|
| 205 |
+
if count := len(segments); count > 1 {
|
| 206 |
+
ids = make([]int64, count)
|
| 207 |
+
sizes = make([]int64, count)
|
| 208 |
+
|
| 209 |
+
for i, seg := range segments {
|
| 210 |
+
idStr := segmentID(seg.getPath())
|
| 211 |
+
id, err := strconv.ParseInt(idStr, 10, 64)
|
| 212 |
+
if err != nil {
|
| 213 |
+
return nil, nil, fmt.Errorf("parse segment id %q: %w", idStr, err)
|
| 214 |
+
}
|
| 215 |
+
ids[i] = id
|
| 216 |
+
sizes[i] = seg.getSize()
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
return ids, sizes, nil
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
func (c *segmentCleanerCommon) readNextAllowed() int64 {
|
| 224 |
+
ts := int64(0)
|
| 225 |
+
c.db.View(func(tx *bolt.Tx) error {
|
| 226 |
+
b := tx.Bucket(cleanupDbBucketMeta)
|
| 227 |
+
v := b.Get(cleanupDbKeyMetaNextAllowedTs)
|
| 228 |
+
if v != nil {
|
| 229 |
+
ts = int64(binary.BigEndian.Uint64(v))
|
| 230 |
+
}
|
| 231 |
+
return nil
|
| 232 |
+
})
|
| 233 |
+
return ts
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
func (c *segmentCleanerCommon) storeNextAllowed(ts int64) error {
|
| 237 |
+
if err := c.db.Update(func(tx *bolt.Tx) error {
|
| 238 |
+
b := tx.Bucket(cleanupDbBucketMeta)
|
| 239 |
+
bufV := make([]byte, 8)
|
| 240 |
+
|
| 241 |
+
binary.BigEndian.PutUint64(bufV, uint64(ts))
|
| 242 |
+
return b.Put(cleanupDbKeyMetaNextAllowedTs, bufV)
|
| 243 |
+
}); err != nil {
|
| 244 |
+
return fmt.Errorf("updating cleanup bolt db %q: %w", c.db.Path(), err)
|
| 245 |
+
}
|
| 246 |
+
return nil
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
func (c *segmentCleanerCommon) deleteSegmentMetas(segIds [][]byte) error {
|
| 250 |
+
if len(segIds) > 0 {
|
| 251 |
+
if err := c.db.Update(func(tx *bolt.Tx) error {
|
| 252 |
+
b := tx.Bucket(cleanupDbBucketSegments)
|
| 253 |
+
for _, k := range segIds {
|
| 254 |
+
if err := b.Delete(k); err != nil {
|
| 255 |
+
return err
|
| 256 |
+
}
|
| 257 |
+
}
|
| 258 |
+
return nil
|
| 259 |
+
}); err != nil {
|
| 260 |
+
return fmt.Errorf("deleting from cleanup bolt db %q: %w", c.db.Path(), err)
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
return nil
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
// based of data stored in cleanup bolt db and existing segments in filesystem
|
| 267 |
+
// method returns:
|
| 268 |
+
// - index of candidate segment best suitable for cleanup,
|
| 269 |
+
// - index of segment, cleanup of candidate should be started from,
|
| 270 |
+
// - index of segment, cleanup of candidate should be finished on,
|
| 271 |
+
// - time of previous candidate's cleanup,
|
| 272 |
+
// - list of segmentIds stored in cleanup bolt db that no longer exist in filesystem
|
| 273 |
+
// - error (if occurred).
|
| 274 |
+
//
|
| 275 |
+
// First candidate to be returned is segment that was not cleaned before (if multiple
|
| 276 |
+
// uncleaned segments exist - the oldest one is returned).
|
| 277 |
+
// If there is no unclean segment, segment that was cleaned as the earliest is returned.
|
| 278 |
+
// For segment already cleaned before to be returned, new segments must have been created
|
| 279 |
+
// after previous cleanup and sum of their sizes should be greater than [minCleanupSizePercent]
|
| 280 |
+
// percent of size of cleaned segment, to increase the chance of segment being actually cleaned,
|
| 281 |
+
// not just copied.
|
| 282 |
+
func (c *segmentCleanerCommon) readEarliestCleaned(ids, sizes []int64, nowTs int64,
|
| 283 |
+
) (int, int, int, int64, [][]byte) {
|
| 284 |
+
earliestCleanedTs := nowTs
|
| 285 |
+
candidateIdx := emptyIdx
|
| 286 |
+
startIdx := emptyIdx
|
| 287 |
+
lastIdx := emptyIdx
|
| 288 |
+
|
| 289 |
+
count := len(ids)
|
| 290 |
+
nonExistentSegmentKeys := [][]byte{}
|
| 291 |
+
emptyId := int64(-1)
|
| 292 |
+
|
| 293 |
+
c.db.View(func(tx *bolt.Tx) error {
|
| 294 |
+
b := tx.Bucket(cleanupDbBucketSegments)
|
| 295 |
+
cur := b.Cursor()
|
| 296 |
+
|
| 297 |
+
// Loop through all segmentIds, the ones stored in cleanup db (cur)
|
| 298 |
+
// and ones currently existing in filesystem (ids).
|
| 299 |
+
// Note: both sets of segmentIds may have unique elements:
|
| 300 |
+
// - cursor can contain segmentIds of segments already removed (by compaction)
|
| 301 |
+
// - ids can contain segmentIds of newly created segments
|
| 302 |
+
// Note: both sets are ordered, therefore in case of one element is missing
|
| 303 |
+
// in set, only this set advances to next element
|
| 304 |
+
idx := 0
|
| 305 |
+
key, val := cur.First()
|
| 306 |
+
for idx < count-1 || key != nil {
|
| 307 |
+
id := emptyId
|
| 308 |
+
storedId := emptyId
|
| 309 |
+
|
| 310 |
+
if idx < count-1 {
|
| 311 |
+
id = ids[idx]
|
| 312 |
+
}
|
| 313 |
+
if key != nil {
|
| 314 |
+
storedId = int64(binary.BigEndian.Uint64(key))
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
// segment with segmentId stored in cleanup db (storedId) no longer exists,
|
| 318 |
+
if id == emptyId || (storedId != emptyId && id > storedId) {
|
| 319 |
+
// entry to be deleted
|
| 320 |
+
nonExistentSegmentKeys = append(nonExistentSegmentKeys, key)
|
| 321 |
+
// advance cursor
|
| 322 |
+
key, val = cur.Next()
|
| 323 |
+
continue
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
// segment with segmentId in filesystem (id) has no entry in cleanup db,
|
| 327 |
+
if storedId == emptyId || (id != emptyId && id < storedId) {
|
| 328 |
+
// as segment was not cleaned before (timestamp == 0), it becomes best
|
| 329 |
+
// candidate for next cleanup.
|
| 330 |
+
// (if there are more segments not yet cleaned, 1st one is selected)
|
| 331 |
+
if earliestCleanedTs > 0 {
|
| 332 |
+
earliestCleanedTs = 0
|
| 333 |
+
candidateIdx = idx
|
| 334 |
+
startIdx = idx + 1
|
| 335 |
+
lastIdx = count - 1
|
| 336 |
+
}
|
| 337 |
+
// advance index
|
| 338 |
+
idx++
|
| 339 |
+
continue
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
// segmentId present in both sets, had to be cleaned before
|
| 343 |
+
// id == cid
|
| 344 |
+
|
| 345 |
+
storedCleanedTs := int64(binary.BigEndian.Uint64(val[0:8]))
|
| 346 |
+
// check if cleaned before current candidate
|
| 347 |
+
if earliestCleanedTs > storedCleanedTs {
|
| 348 |
+
lastId := ids[count-1]
|
| 349 |
+
storedLastId := int64(binary.BigEndian.Uint64(val[8:16]))
|
| 350 |
+
// check if new segments created after last cleanup
|
| 351 |
+
if storedLastId < lastId {
|
| 352 |
+
// last segment's id in filesystem is higher than last id used for cleanup
|
| 353 |
+
size := sizes[idx]
|
| 354 |
+
storedSize := int64(binary.BigEndian.Uint64(val[16:24]))
|
| 355 |
+
|
| 356 |
+
// In general segment could be cleaned considering only segments created
|
| 357 |
+
// after its last cleanup. One exception is when segment was compacted
|
| 358 |
+
// (previous and current sizes differ).
|
| 359 |
+
// As after compaction cleanup db will contain only entry of right segment,
|
| 360 |
+
// not the left one, it is unknown what was last segment used for cleanup of removed
|
| 361 |
+
// left segment, therefore compacted segment will be cleaned again using all newer segments.
|
| 362 |
+
possibleStartIdx := idx + 1
|
| 363 |
+
// in case of using segments that were already used for cleanup, process them in reverse
|
| 364 |
+
// order starting with newest ones, to maximize the chance of finding redundant entries
|
| 365 |
+
// as soon as possible (leaving segments that were already used for cleanup as last ones)
|
| 366 |
+
reverseOrder := true
|
| 367 |
+
if size == storedSize {
|
| 368 |
+
reverseOrder = false
|
| 369 |
+
// size not changed (not compacted), clean using only newly created segments,
|
| 370 |
+
// skipping segments already processed in previous cleanup
|
| 371 |
+
for i := idx + 1; i < count; i++ {
|
| 372 |
+
possibleStartIdx = i
|
| 373 |
+
if ids[i] > storedLastId {
|
| 374 |
+
break
|
| 375 |
+
}
|
| 376 |
+
}
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
// segment should be cleaned only if sum of sizes of segments to be cleaned
|
| 380 |
+
// with exceeds [minCleanupSizePercent] of its current size, to increase
|
| 381 |
+
// probability of redunand keys.
|
| 382 |
+
sumSize := int64(0)
|
| 383 |
+
for i := possibleStartIdx; i < count; i++ {
|
| 384 |
+
sumSize += sizes[i]
|
| 385 |
+
}
|
| 386 |
+
if size*minCleanupSizePercent/100 <= sumSize {
|
| 387 |
+
earliestCleanedTs = storedCleanedTs
|
| 388 |
+
candidateIdx = idx
|
| 389 |
+
startIdx = possibleStartIdx
|
| 390 |
+
lastIdx = count - 1
|
| 391 |
+
|
| 392 |
+
if reverseOrder {
|
| 393 |
+
startIdx, lastIdx = lastIdx, startIdx
|
| 394 |
+
}
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
}
|
| 398 |
+
// advance cursor and index
|
| 399 |
+
key, val = cur.Next()
|
| 400 |
+
idx++
|
| 401 |
+
}
|
| 402 |
+
return nil
|
| 403 |
+
})
|
| 404 |
+
return candidateIdx, startIdx, lastIdx, earliestCleanedTs, nonExistentSegmentKeys
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
func (c *segmentCleanerCommon) storeSegmentMeta(id, lastProcessedId, size, cleanedTs int64) error {
|
| 408 |
+
bufK := make([]byte, 8)
|
| 409 |
+
binary.BigEndian.PutUint64(bufK, uint64(id))
|
| 410 |
+
|
| 411 |
+
bufV := make([]byte, 24)
|
| 412 |
+
binary.BigEndian.PutUint64(bufV[0:8], uint64(cleanedTs))
|
| 413 |
+
binary.BigEndian.PutUint64(bufV[8:16], uint64(lastProcessedId))
|
| 414 |
+
binary.BigEndian.PutUint64(bufV[16:24], uint64(size))
|
| 415 |
+
|
| 416 |
+
if err := c.db.Update(func(tx *bolt.Tx) error {
|
| 417 |
+
b := tx.Bucket(cleanupDbBucketSegments)
|
| 418 |
+
return b.Put(bufK, bufV)
|
| 419 |
+
}); err != nil {
|
| 420 |
+
return fmt.Errorf("updating cleanup bolt db %q: %w", c.db.Path(), err)
|
| 421 |
+
}
|
| 422 |
+
return nil
|
| 423 |
+
}
|
| 424 |
+
|
| 425 |
+
func (c *segmentCleanerCommon) cleanupOnce(shouldAbort cyclemanager.ShouldAbortCallback,
|
| 426 |
+
) (bool, error) {
|
| 427 |
+
if c.sg.isReadyOnly() {
|
| 428 |
+
return false, nil
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
var err error
|
| 432 |
+
candidateIdx, startIdx, lastIdx, onCompleted, err := c.findCandidate()
|
| 433 |
+
if err != nil {
|
| 434 |
+
return false, err
|
| 435 |
+
}
|
| 436 |
+
if candidateIdx == emptyIdx {
|
| 437 |
+
return false, nil
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
if c.sg.allocChecker != nil {
|
| 441 |
+
// allocChecker is optional
|
| 442 |
+
if err := c.sg.allocChecker.CheckAlloc(100 * 1024 * 1024); err != nil {
|
| 443 |
+
// if we don't have at least 100MB to spare, don't start a cleanup. A
|
| 444 |
+
// cleanup does not actually need a 100MB, but it will create garbage
|
| 445 |
+
// that needs to be cleaned up. If we're so close to the memory limit, we
|
| 446 |
+
// can increase stability by preventing anything that's not strictly
|
| 447 |
+
// necessary. Cleanup can simply resume when the cluster has been
|
| 448 |
+
// scaled.
|
| 449 |
+
c.sg.logger.WithFields(logrus.Fields{
|
| 450 |
+
"action": "lsm_cleanup",
|
| 451 |
+
"event": "cleanup_skipped_oom",
|
| 452 |
+
"path": c.sg.dir,
|
| 453 |
+
}).WithError(err).
|
| 454 |
+
Warnf("skipping cleanup due to memory pressure")
|
| 455 |
+
|
| 456 |
+
return false, nil
|
| 457 |
+
}
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
if shouldAbort() {
|
| 461 |
+
c.sg.logger.WithFields(logrus.Fields{
|
| 462 |
+
"action": "lsm_cleanup",
|
| 463 |
+
"path": c.sg.dir,
|
| 464 |
+
}).Warnf("skipping cleanup due to shouldAbort")
|
| 465 |
+
return false, nil
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
oldSegment := c.sg.segmentAtPos(candidateIdx)
|
| 469 |
+
segmentId := segmentID(oldSegment.path)
|
| 470 |
+
tmpSegmentPath := filepath.Join(c.sg.dir, "segment-"+segmentId+segmentExtraInfo(oldSegment.level, oldSegment.strategy)+".db.tmp")
|
| 471 |
+
scratchSpacePath := oldSegment.path + "cleanup.scratch.d"
|
| 472 |
+
|
| 473 |
+
start := time.Now()
|
| 474 |
+
c.sg.logger.WithFields(logrus.Fields{
|
| 475 |
+
"action": "lsm_cleanup",
|
| 476 |
+
"path": c.sg.dir,
|
| 477 |
+
"candidateIdx": candidateIdx,
|
| 478 |
+
"startIdx": startIdx,
|
| 479 |
+
"lastIdx": lastIdx,
|
| 480 |
+
"segmentId": segmentId,
|
| 481 |
+
}).Info("cleanup started with candidate")
|
| 482 |
+
defer func() {
|
| 483 |
+
l := c.sg.logger.WithFields(logrus.Fields{
|
| 484 |
+
"action": "lsm_cleanup",
|
| 485 |
+
"path": c.sg.dir,
|
| 486 |
+
"segmentId": segmentId,
|
| 487 |
+
"took": time.Since(start),
|
| 488 |
+
})
|
| 489 |
+
if err == nil {
|
| 490 |
+
l.Info("clenaup finished")
|
| 491 |
+
} else {
|
| 492 |
+
l.WithError(err).Error("cleanup failed")
|
| 493 |
+
}
|
| 494 |
+
}()
|
| 495 |
+
|
| 496 |
+
file, err := os.Create(tmpSegmentPath)
|
| 497 |
+
if err != nil {
|
| 498 |
+
return false, err
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
switch c.sg.strategy {
|
| 502 |
+
case StrategyReplace:
|
| 503 |
+
c := newSegmentCleanerReplace(file, oldSegment.newCursor(),
|
| 504 |
+
c.sg.makeKeyExistsOnUpperSegments(startIdx, lastIdx), oldSegment.level,
|
| 505 |
+
oldSegment.secondaryIndexCount, scratchSpacePath, c.sg.enableChecksumValidation)
|
| 506 |
+
if err = c.do(shouldAbort); err != nil {
|
| 507 |
+
return false, err
|
| 508 |
+
}
|
| 509 |
+
default:
|
| 510 |
+
err = fmt.Errorf("unsported strategy %q", c.sg.strategy)
|
| 511 |
+
return false, err
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
if err = file.Sync(); err != nil {
|
| 515 |
+
err = fmt.Errorf("fsync cleaned segment file: %w", err)
|
| 516 |
+
return false, err
|
| 517 |
+
}
|
| 518 |
+
if err = file.Close(); err != nil {
|
| 519 |
+
err = fmt.Errorf("close cleaned segment file: %w", err)
|
| 520 |
+
return false, err
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
segment, err := c.sg.replaceSegment(candidateIdx, tmpSegmentPath)
|
| 524 |
+
if err != nil {
|
| 525 |
+
err = fmt.Errorf("replace compacted segments: %w", err)
|
| 526 |
+
return false, err
|
| 527 |
+
}
|
| 528 |
+
if err = onCompleted(segment.size); err != nil {
|
| 529 |
+
err = fmt.Errorf("callback cleaned segment file: %w", err)
|
| 530 |
+
return false, err
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
return true, nil
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
type onCompletedFunc func(size int64) error
|
| 537 |
+
|
| 538 |
+
// ================================================================
|
| 539 |
+
|
| 540 |
+
type keyExistsOnUpperSegmentsFunc func(key []byte) (bool, error)
|
| 541 |
+
|
| 542 |
+
func (sg *SegmentGroup) makeKeyExistsOnUpperSegments(startIdx, lastIdx int) keyExistsOnUpperSegmentsFunc {
|
| 543 |
+
return func(key []byte) (bool, error) {
|
| 544 |
+
// asc order by default
|
| 545 |
+
i := startIdx
|
| 546 |
+
updateI := func() { i++ }
|
| 547 |
+
if startIdx > lastIdx {
|
| 548 |
+
// dest order
|
| 549 |
+
i = lastIdx
|
| 550 |
+
updateI = func() { i-- }
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
segAtPos := func() *segment {
|
| 554 |
+
segments, release := sg.getAndLockSegments()
|
| 555 |
+
defer release()
|
| 556 |
+
|
| 557 |
+
if i >= startIdx && i <= lastIdx {
|
| 558 |
+
j := i
|
| 559 |
+
updateI()
|
| 560 |
+
return segments[j].getSegment()
|
| 561 |
+
}
|
| 562 |
+
return nil
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
for seg := segAtPos(); seg != nil; seg = segAtPos() {
|
| 566 |
+
if exists, err := seg.exists(key); err != nil {
|
| 567 |
+
return false, err
|
| 568 |
+
} else if exists {
|
| 569 |
+
return true, nil
|
| 570 |
+
}
|
| 571 |
+
}
|
| 572 |
+
return false, nil
|
| 573 |
+
}
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
func (sg *SegmentGroup) replaceSegment(segmentIdx int, tmpSegmentPath string,
|
| 577 |
+
) (*segment, error) {
|
| 578 |
+
oldSegment := sg.segmentAtPos(segmentIdx)
|
| 579 |
+
countNetAdditions := oldSegment.countNetAdditions
|
| 580 |
+
|
| 581 |
+
// as a guardrail validate that the segment is considered a .tmp segment.
|
| 582 |
+
// This way we can be sure that we're not accidentally operating on a live
|
| 583 |
+
// segment as the segment group completely ignores .tmp segment files
|
| 584 |
+
if !strings.HasSuffix(tmpSegmentPath, ".tmp") {
|
| 585 |
+
return nil, fmt.Errorf("pre computing a segment expects a .tmp segment path")
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
seg, err := newSegment(tmpSegmentPath, sg.logger, sg.metrics, nil,
|
| 589 |
+
segmentConfig{
|
| 590 |
+
mmapContents: sg.mmapContents,
|
| 591 |
+
useBloomFilter: sg.useBloomFilter,
|
| 592 |
+
calcCountNetAdditions: sg.calcCountNetAdditions,
|
| 593 |
+
overwriteDerived: true,
|
| 594 |
+
enableChecksumValidation: sg.enableChecksumValidation,
|
| 595 |
+
MinMMapSize: sg.MinMMapSize,
|
| 596 |
+
allocChecker: sg.allocChecker,
|
| 597 |
+
precomputedCountNetAdditions: &countNetAdditions,
|
| 598 |
+
writeMetadata: sg.writeMetadata,
|
| 599 |
+
})
|
| 600 |
+
if err != nil {
|
| 601 |
+
return nil, fmt.Errorf("precompute segment meta: %w", err)
|
| 602 |
+
}
|
| 603 |
+
|
| 604 |
+
newSegment, err := sg.replaceSegmentBlocking(segmentIdx, oldSegment, seg)
|
| 605 |
+
if err != nil {
|
| 606 |
+
return nil, fmt.Errorf("replace segment (blocking): %w", err)
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
if err := sg.deleteOldSegmentsNonBlocking(oldSegment); err != nil {
|
| 610 |
+
// don't abort if the delete fails, we can still continue (albeit
|
| 611 |
+
// without freeing disk space that should have been freed). The
|
| 612 |
+
// compaction itself was successful.
|
| 613 |
+
sg.logger.WithError(err).WithFields(logrus.Fields{
|
| 614 |
+
"action": "lsm_replace_segments_delete_file",
|
| 615 |
+
"file": oldSegment.path,
|
| 616 |
+
}).Error("failed to delete file already marked for deletion")
|
| 617 |
+
}
|
| 618 |
+
|
| 619 |
+
return newSegment, nil
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
func (sg *SegmentGroup) replaceSegmentBlocking(
|
| 623 |
+
segmentIdx int, oldSegment *segment, newSegment *segment,
|
| 624 |
+
) (*segment, error) {
|
| 625 |
+
sg.maintenanceLock.Lock()
|
| 626 |
+
defer sg.maintenanceLock.Unlock()
|
| 627 |
+
|
| 628 |
+
start := time.Now()
|
| 629 |
+
|
| 630 |
+
if err := oldSegment.close(); err != nil {
|
| 631 |
+
return nil, fmt.Errorf("close disk segment %q: %w", oldSegment.path, err)
|
| 632 |
+
}
|
| 633 |
+
if err := oldSegment.markForDeletion(); err != nil {
|
| 634 |
+
return nil, fmt.Errorf("drop disk segment %q: %w", oldSegment.path, err)
|
| 635 |
+
}
|
| 636 |
+
if err := diskio.Fsync(sg.dir); err != nil {
|
| 637 |
+
return nil, fmt.Errorf("fsync segment directory %q: %w", sg.dir, err)
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
segmentId := segmentID(oldSegment.path)
|
| 641 |
+
newPath, err := sg.stripTmpExtension(newSegment.path, segmentId, segmentId)
|
| 642 |
+
if err != nil {
|
| 643 |
+
return nil, errors.Wrap(err, "strip .tmp extension of new segment")
|
| 644 |
+
}
|
| 645 |
+
newSegment.path = newPath
|
| 646 |
+
|
| 647 |
+
// the old segment have been deleted, we can now safely remove the .tmp
|
| 648 |
+
// extension from the new segment itself and the pre-computed files
|
| 649 |
+
for i, tmpPath := range newSegment.metaPaths {
|
| 650 |
+
path, err := sg.stripTmpExtension(tmpPath, segmentId, segmentId)
|
| 651 |
+
if err != nil {
|
| 652 |
+
return nil, fmt.Errorf("strip .tmp extension of new segment %q: %w", tmpPath, err)
|
| 653 |
+
}
|
| 654 |
+
newSegment.metaPaths[i] = path
|
| 655 |
+
}
|
| 656 |
+
|
| 657 |
+
sg.segments[segmentIdx] = newSegment
|
| 658 |
+
|
| 659 |
+
sg.observeReplaceDuration(start, segmentIdx, oldSegment, newSegment)
|
| 660 |
+
return newSegment, nil
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
func (sg *SegmentGroup) observeReplaceDuration(
|
| 664 |
+
start time.Time, segmentIdx int, oldSegment, newSegment *segment,
|
| 665 |
+
) {
|
| 666 |
+
// observe duration - warn if it took too long
|
| 667 |
+
took := time.Since(start)
|
| 668 |
+
fields := sg.logger.WithFields(logrus.Fields{
|
| 669 |
+
"action": "lsm_replace_segment_blocking",
|
| 670 |
+
"segment_index": segmentIdx,
|
| 671 |
+
"path_old": oldSegment.path,
|
| 672 |
+
"path_new": newSegment.path,
|
| 673 |
+
"took": took,
|
| 674 |
+
})
|
| 675 |
+
msg := fmt.Sprintf("replacing segment took %s", took)
|
| 676 |
+
if took > replaceSegmentWarnThreshold {
|
| 677 |
+
fields.Warn(msg)
|
| 678 |
+
} else {
|
| 679 |
+
fields.Debug(msg)
|
| 680 |
+
}
|
| 681 |
+
}
|